Merge pull request #1903 from containers/renovate/github.com-containers-image-v5-5.x

fix(deps): update module github.com/containers/image/v5 to v5.25.0
This commit is contained in:
Miloslav Trmač 2023-04-05 18:28:43 +02:00 committed by GitHub
commit 3ddbfec17c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 940 additions and 389 deletions

15
go.mod
View File

@ -4,9 +4,9 @@ go 1.18
require ( require (
github.com/containers/common v0.51.2 github.com/containers/common v0.51.2
github.com/containers/image/v5 v5.24.3-0.20230401101358-e3437f272920 github.com/containers/image/v5 v5.25.0
github.com/containers/ocicrypt v1.1.7 github.com/containers/ocicrypt v1.1.7
github.com/containers/storage v1.45.4 github.com/containers/storage v1.46.0
github.com/docker/distribution v2.8.1+incompatible github.com/docker/distribution v2.8.1+incompatible
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc2 github.com/opencontainers/image-spec v1.1.0-rc2
@ -24,12 +24,12 @@ require (
require ( require (
github.com/BurntSushi/toml v1.2.1 // indirect github.com/BurntSushi/toml v1.2.1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.9.7 // indirect github.com/Microsoft/hcsshim v0.9.8 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/coreos/go-oidc/v3 v3.5.0 // indirect github.com/coreos/go-oidc/v3 v3.5.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
@ -58,7 +58,7 @@ require (
github.com/go-playground/validator/v10 v10.12.0 // indirect github.com/go-playground/validator/v10 v10.12.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-containerregistry v0.13.0 // indirect github.com/google/go-containerregistry v0.13.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect github.com/google/go-intervals v0.0.2 // indirect
github.com/google/trillian v1.5.1 // indirect github.com/google/trillian v1.5.1 // indirect
@ -86,7 +86,7 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/runc v1.1.4 // indirect github.com/opencontainers/runc v1.1.5 // indirect
github.com/opencontainers/runtime-spec v1.1.0-rc.1 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.1 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
@ -95,9 +95,10 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect github.com/proglottis/gpgme v0.1.3 // indirect
github.com/rivo/uniseg v0.4.4 // indirect github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/segmentio/ksuid v1.0.4 // indirect github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.1.0 // indirect github.com/sigstore/fulcio v1.2.0 // indirect
github.com/sigstore/rekor v1.1.0 // indirect github.com/sigstore/rekor v1.1.0 // indirect
github.com/sigstore/sigstore v1.6.0 // indirect github.com/sigstore/sigstore v1.6.0 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect

33
go.sum
View File

@ -60,8 +60,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.9.7 h1:mKNHW/Xvv1aFH87Jb6ERDzXTJTLPlmzfZ28VBFD/bfg= github.com/Microsoft/hcsshim v0.9.8 h1:lf7xxK2+Ikbj9sVf2QZsouGjRjEp2STj1yDHgoVtU5k=
github.com/Microsoft/hcsshim v0.9.7/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.8/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -190,8 +190,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.14.1 h1:n9M2GDSWM96pyipFTA0DaU+zdtzi3Iwsnj/rIHr1yFM= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
github.com/containerd/stargz-snapshotter/estargz v0.14.1/go.mod h1:uPtMw6ucGJYwImjhxk/oghZmfElF/841u86wReNggNk= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@ -214,8 +214,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containers/common v0.51.2 h1:tJ6Nt+zAC6t8nm8qvlVKNjpp/uh3ane80gyj63BwP0Y= github.com/containers/common v0.51.2 h1:tJ6Nt+zAC6t8nm8qvlVKNjpp/uh3ane80gyj63BwP0Y=
github.com/containers/common v0.51.2/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM= github.com/containers/common v0.51.2/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM=
github.com/containers/image/v5 v5.24.3-0.20230401101358-e3437f272920 h1:hycywXvCiW9mISvh9jr2Bv/yei7yz4Epu40EeCWkQR8= github.com/containers/image/v5 v5.25.0 h1:TJ0unmalbU+scd0i3Txap2wjGsAnv06MSCwgn6bsizk=
github.com/containers/image/v5 v5.24.3-0.20230401101358-e3437f272920/go.mod h1:nBodKP9+9IjCTME53bROtIOYDkj9GogrA3Nz2icRWGI= github.com/containers/image/v5 v5.25.0/go.mod h1:EKvys0WVlRFkDw26R8y52TuhV9Tfn0yq2luLX6W52Ls=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -223,8 +223,8 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U=
github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw= github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw=
github.com/containers/storage v1.45.4 h1:49u6l37f/QC2ylG4d9FNS3ERfFKH462jrd7HARf3tfw= github.com/containers/storage v1.46.0 h1:K3Tw/U+ZwmMT/tzX04mh5wnK2PuIdEGS2BGMP7ZYAqw=
github.com/containers/storage v1.45.4/go.mod h1:mnFUauIJ9UiIYn2KIVavFz73PH8MUhI/8FCkjB7OX8o= github.com/containers/storage v1.46.0/go.mod h1:AVNewDV1jODy8b4Ia4AgsJ6UFKQSIaOQvQ8S6N4VuH0=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -456,8 +456,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -586,8 +587,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -705,8 +706,8 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs=
github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@ -759,7 +760,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -781,6 +782,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk= github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@ -797,8 +800,8 @@ github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/fulcio v1.1.0 h1:mzzJ05Ccu8Y2inyioklNvc8MpzlGHxu8YqNeTm0dHfU= github.com/sigstore/fulcio v1.2.0 h1:I4H764cDbryKXkPtasUvo8bcix/7xLvkxWYWNp+JtWI=
github.com/sigstore/fulcio v1.1.0/go.mod h1:zv1ZQTXZbUwQdRwajlQksc34pRas+2aZYpIZoQBNev8= github.com/sigstore/fulcio v1.2.0/go.mod h1:FS7qpBvOEqs0uEh1+hJxzxtJistWN29ybLtAzFNUi0c=
github.com/sigstore/rekor v1.1.0 h1:9fjPvW0WERE7VPtSSVSTbDLLOsrNx3RtiIeZ4/1tmDI= github.com/sigstore/rekor v1.1.0 h1:9fjPvW0WERE7VPtSSVSTbDLLOsrNx3RtiIeZ4/1tmDI=
github.com/sigstore/rekor v1.1.0/go.mod h1:jEOGDGPMURBt9WR50N0rO7X8GZzLE3UQT+ln6BKJ/m0= github.com/sigstore/rekor v1.1.0/go.mod h1:jEOGDGPMURBt9WR50N0rO7X8GZzLE3UQT+ln6BKJ/m0=
github.com/sigstore/sigstore v1.6.0 h1:0fYHVoUlPU3WM8o3U1jT9SI2lqQE68XbG+qWncXaZC8= github.com/sigstore/sigstore v1.6.0 h1:0fYHVoUlPU3WM8o3U1jT9SI2lqQE68XbG+qWncXaZC8=

View File

@ -176,7 +176,7 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat) recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat)
// Note: recompressed must be closed on all return paths. // Note: recompressed must be closed on all return paths.
stream.reader = recompressed stream.reader = recompressed
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
Digest: "", Digest: "",
Size: -1, Size: -1,
} }
@ -203,7 +203,7 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
} }
// Note: s must be closed on all return paths. // Note: s must be closed on all return paths.
stream.reader = s stream.reader = s
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
Digest: "", Digest: "",
Size: -1, Size: -1,
} }

View File

@ -739,9 +739,9 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse
res := types.BlobInfo{ res := types.BlobInfo{
Digest: reusedBlob.Digest, Digest: reusedBlob.Digest,
Size: reusedBlob.Size, Size: reusedBlob.Size,
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
Annotations: inputInfo.Annotations, Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
CompressionOperation: reusedBlob.CompressionOperation, CompressionOperation: reusedBlob.CompressionOperation,
CompressionAlgorithm: reusedBlob.CompressionAlgorithm, CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.

View File

@ -44,21 +44,21 @@ func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
} }
// AlgorithmCompressor returns the compressor field of algo. // AlgorithmCompressor returns the compressor field of algo.
// This is a function instead of a public method so that it is only callable from by code // This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage. // that is allowed to import this internal subpackage.
func AlgorithmCompressor(algo Algorithm) CompressorFunc { func AlgorithmCompressor(algo Algorithm) CompressorFunc {
return algo.compressor return algo.compressor
} }
// AlgorithmDecompressor returns the decompressor field of algo. // AlgorithmDecompressor returns the decompressor field of algo.
// This is a function instead of a public method so that it is only callable from by code // This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage. // that is allowed to import this internal subpackage.
func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { func AlgorithmDecompressor(algo Algorithm) DecompressorFunc {
return algo.decompressor return algo.decompressor
} }
// AlgorithmPrefix returns the prefix field of algo. // AlgorithmPrefix returns the prefix field of algo.
// This is a function instead of a public method so that it is only callable from by code // This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage. // that is allowed to import this internal subpackage.
func AlgorithmPrefix(algo Algorithm) []byte { func AlgorithmPrefix(algo Algorithm) []byte {
return algo.prefix return algo.prefix

View File

@ -33,6 +33,58 @@ func (f *fulcioTrustRoot) validate() error {
return nil return nil
} }
// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate;
// it fails if the extension is not present in the certificate, or on any inconsistency.
func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) {
// == Validate the recorded OIDC issuer
gotOIDCIssuer1 := false
gotOIDCIssuer2 := false
var oidcIssuer1, oidcIssuer2 string
// certificate.ParseExtensions doesnt reject duplicate extensions, and doesnt detect inconsistencies
// between certificate.OIDIssuer and certificate.OIDIssuerV2.
// Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19,
// reject duplicates manually.
for _, untrustedExt := range untrustedCertificate.Extensions {
if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it.
if gotOIDCIssuer1 {
// Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
// already in ParseCertificate.
return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension")
}
oidcIssuer1 = string(untrustedExt.Value)
gotOIDCIssuer1 = true
} else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) {
if gotOIDCIssuer2 {
// Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
// already in ParseCertificate.
return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension")
}
rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2)
if err != nil {
return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err))
}
if len(rest) != 0 {
return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data")
}
gotOIDCIssuer2 = true
}
}
switch {
case gotOIDCIssuer1 && gotOIDCIssuer2:
if oidcIssuer1 != oidcIssuer2 {
return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v",
oidcIssuer1, oidcIssuer2))
}
return oidcIssuer1, nil
case gotOIDCIssuer1:
return oidcIssuer1, nil
case gotOIDCIssuer2:
return oidcIssuer2, nil
default:
return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension")
}
}
func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) {
// == Verify the certificate is correctly signed // == Verify the certificate is correctly signed
var untrustedIntermediatePool *x509.CertPool // = nil var untrustedIntermediatePool *x509.CertPool // = nil
@ -113,24 +165,9 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
// make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
// == Validate the recorded OIDC issuer // == Validate the recorded OIDC issuer
gotOIDCIssuer := false oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate)
var oidcIssuer string if err != nil {
// certificate.ParseExtensions doesnt reject duplicate extensions. return nil, err
// Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19,
// reject duplicates manually. With Go 1.19, we could call certificate.ParseExtensions again.
for _, untrustedExt := range untrustedCertificate.Extensions {
if untrustedExt.Id.Equal(certificate.OIDIssuer) {
if gotOIDCIssuer {
// Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
// already in ParseCertificate.
return nil, internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer extension")
}
oidcIssuer = string(untrustedExt.Value)
gotOIDCIssuer = true
}
}
if !gotOIDCIssuer {
return nil, internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension")
} }
if oidcIssuer != f.oidcIssuer { if oidcIssuer != f.oidcIssuer {
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer)) return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer))

View File

@ -293,7 +293,7 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []
if nextPhysical >= len(physicalInfos) { if nextPhysical >= len(physicalInfos) {
return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
} }
res[i] = physicalInfos[nextPhysical] res[i] = physicalInfos[nextPhysical] // FIXME? Should we preserve more data in manifestInfos? Notably the current approach correctly removes zstd:chunked metadata annotations.
nextPhysical++ nextPhysical++
} }
} }

View File

@ -6,12 +6,12 @@ const (
// VersionMajor is for an API incompatible changes // VersionMajor is for an API incompatible changes
VersionMajor = 5 VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 24 VersionMinor = 25
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 3 VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev" VersionDev = ""
) )
// Version is the specification version that the package types support. // Version is the specification version that the package types support.

View File

@ -17,15 +17,15 @@ env:
#### ####
#### Cache-image names to test with (double-quotes around names are critical) #### Cache-image names to test with (double-quotes around names are critical)
### ###
FEDORA_NAME: "fedora-37" ### 20230120t152650z-f37f36u2204 FEDORA_NAME: "fedora-37"
UBUNTU_NAME: "ubuntu-2204" ### 20230120t152650z-f37f36u2204 DEBIAN_NAME: "debian-12"
# GCE project where images live # GCE project where images live
IMAGE_PROJECT: "libpod-218412" IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images # VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20230120t152650z-f37f36u2204" IMAGE_SUFFIX: "c20230330t153101z-f37f36d12"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
#### ####
#### Command variables to help avoid duplication #### Command variables to help avoid duplication
@ -53,19 +53,31 @@ gce_instance:
image_name: "${FEDORA_CACHE_IMAGE_NAME}" image_name: "${FEDORA_CACHE_IMAGE_NAME}"
fedora_testing_task: &fedora_testing linux_testing: &linux_testing
alias: fedora_testing
name: &std_test_name "${OS_NAME} ${TEST_DRIVER}"
depends_on: depends_on:
- lint - lint
only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
gce_instance: # Only need to specify differences from defaults (above) gce_instance: # Only need to specify differences from defaults (above)
image_name: "${VM_IMAGE}" image_name: "${VM_IMAGE}"
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}'
always:
df_script: '${_DFCMD} || true'
rh_audit_log_script: '${_RAUDITCMD} || true'
debian_audit_log_script: '${_UAUDITCMD} || true'
journal_log_script: '${_JOURNALCMD} || true'
fedora_testing_task: &fedora_testing
<<: *linux_testing
alias: fedora_testing
name: &std_test_name "${OS_NAME} ${TEST_DRIVER}"
env: env:
OS_NAME: "${FEDORA_NAME}" OS_NAME: "${FEDORA_NAME}"
VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}" VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}"
# Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types. # Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types.
matrix: &test_matrix matrix: &test_matrix
- env: - env:
@ -81,26 +93,16 @@ fedora_testing_task: &fedora_testing
- env: - env:
TEST_DRIVER: "btrfs" TEST_DRIVER: "btrfs"
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}'
always:
df_script: '${_DFCMD} || true'
rh_audit_log_script: '${_RAUDITCMD} || true'
ubuntu_audit_log_script: '${_UAUDITCMD} || true'
journal_log_script: '${_JOURNALCMD} || true'
# aufs was dropped between 20.04 and 22.04, can't test it # aufs was dropped between 20.04 and 22.04, can't test it
ubuntu_testing_task: &ubuntu_testing debian_testing_task: &debian_testing
<<: *fedora_testing <<: *linux_testing
alias: ubuntu_testing alias: debian_testing
name: *std_test_name name: *std_test_name
only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
env: env:
OS_NAME: "${UBUNTU_NAME}" OS_NAME: "${DEBIAN_NAME}"
VM_IMAGE: "${UBUNTU_CACHE_IMAGE_NAME}" VM_IMAGE: "${DEBIAN_CACHE_IMAGE_NAME}"
# Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types.
matrix: matrix:
- env: - env:
TEST_DRIVER: "vfs" TEST_DRIVER: "vfs"
@ -120,7 +122,7 @@ lint_task:
env: env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
container: container:
image: golang:1.17 image: golang
modules_cache: modules_cache:
fingerprint_script: cat go.sum fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod folder: $GOPATH/pkg/mod
@ -143,7 +145,7 @@ meta_task:
# Space-separated list of images used by this repository state # Space-separated list of images used by this repository state
IMGNAMES: |- IMGNAMES: |-
${FEDORA_CACHE_IMAGE_NAME} ${FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME} ${DEBIAN_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}" BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826] GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
@ -156,7 +158,7 @@ meta_task:
vendor_task: vendor_task:
container: container:
image: golang:1.17 image: golang
modules_cache: modules_cache:
fingerprint_script: cat go.sum fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod folder: $GOPATH/pkg/mod
@ -175,7 +177,7 @@ success_task:
depends_on: depends_on:
- lint - lint
- fedora_testing - fedora_testing
- ubuntu_testing - debian_testing
- meta - meta
- vendor - vendor
- cross - cross

View File

@ -2,36 +2,70 @@
run: run:
concurrency: 6 concurrency: 6
deadline: 5m deadline: 5m
skip-dirs-use-default: true
linters: linters:
enable-all: true enable-all: true
disable: disable:
- cyclop
- deadcode
- dogsled - dogsled
- dupl - dupl
- errcheck - errcheck
- errname
- errorlint
- exhaustive
- exhaustivestruct
- exhaustruct
- forbidigo
- forcetypeassert
- funlen - funlen
- gci
- gochecknoglobals - gochecknoglobals
- gochecknoinits - gochecknoinits
- gocognit - gocognit
- gocritic - gocritic
- gocyclo - gocyclo
- godot
- godox - godox
- goerr113
- gofumpt
- golint
- gomnd - gomnd
- gosec - gosec
- gosimple - gosimple
- govet - govet
- ifshort
- ineffassign - ineffassign
- interfacer
- interfacebloat
- ireturn
- lll - lll
- maintidx
- maligned - maligned
- misspell - misspell
- musttag
- nakedret - nakedret
- nestif
- nlreturn
- nolintlint
- nonamedreturns
- nosnakecase
- paralleltest
- prealloc - prealloc
- predeclared
- rowserrcheck
- scopelint - scopelint
- staticcheck - staticcheck
- structcheck - structcheck
- stylecheck - stylecheck
- tagliatelle
- testpackage
- thelper
- unconvert - unconvert
- unparam - unparam
- unused
- varcheck - varcheck
- varnamelen
- wastedassign
- whitespace - whitespace
- wrapcheck
- wsl - wsl

View File

@ -26,7 +26,7 @@ NATIVETAGS :=
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go GO ?= go
TESTFLAGS := $(shell go test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
# Go module support: set `-mod=vendor` to use the vendored sources # Go module support: set `-mod=vendor` to use the vendored sources
ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true) ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
@ -93,9 +93,9 @@ help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
vendor-in-container: vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang:1.17 make vendor podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
vendor: vendor:
$(GO) mod tidy -compat=1.17 $(GO) mod tidy
$(GO) mod vendor $(GO) mod vendor
$(GO) mod verify $(GO) mod verify

View File

@ -1 +1 @@
1.45.4 1.46.0

View File

@ -107,13 +107,13 @@ type rwContainerStore interface {
// stopReading releases locks obtained by startReading. // stopReading releases locks obtained by startReading.
stopReading() stopReading()
// Create creates a container that has a specified ID (or generates a // create creates a container that has a specified ID (or generates a
// random one if an empty value is supplied) and optional names, // random one if an empty value is supplied) and optional names,
// based on the specified image, using the specified layer as its // based on the specified image, using the specified layer as its
// read-write layer. // read-write layer.
// The maps in the container's options structure are recorded for the // The maps in the container's options structure are recorded for the
// convenience of the caller, nothing more. // convenience of the caller, nothing more.
Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) create(id string, names []string, image, layer string, options *ContainerOptions) (*Container, error)
// updateNames modifies names associated with a container based on (op, names). // updateNames modifies names associated with a container based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error updateNames(id string, names []string, op updateNameOperation) error
@ -411,7 +411,7 @@ func (r *containerStore) GarbageCollect() error {
for _, entry := range entries { for _, entry := range entries {
id := entry.Name() id := entry.Name()
// Does it look like a datadir directory? // Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) { if !entry.IsDir() || stringid.ValidateID(id) != nil {
continue continue
} }
@ -651,7 +651,10 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
} }
// Requires startWriting. // Requires startWriting.
func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { func (r *containerStore) create(id string, names []string, image, layer string, options *ContainerOptions) (container *Container, err error) {
if options == nil {
options = &ContainerOptions{}
}
if id == "" { if id == "" {
id = stringid.GenerateRandomID() id = stringid.GenerateRandomID()
_, idInUse := r.byid[id] _, idInUse := r.byid[id]
@ -663,12 +666,6 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
if _, idInUse := r.byid[id]; idInUse { if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID return nil, ErrDuplicateID
} }
if options.MountOpts != nil {
options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
options.Flags[volatileFlag] = true
}
names = dedupeNames(names) names = dedupeNames(names)
for _, name := range names { for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse { if _, nameInUse := r.byname[name]; nameInUse {
@ -686,7 +683,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Names: names, Names: names,
ImageID: image, ImageID: image,
LayerID: layer, LayerID: layer,
Metadata: metadata, Metadata: options.Metadata,
BigDataNames: []string{}, BigDataNames: []string{},
BigDataSizes: make(map[string]int64), BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest), BigDataDigests: make(map[string]digest.Digest),
@ -696,16 +693,42 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
GIDMap: copyIDMap(options.GIDMap), GIDMap: copyIDMap(options.GIDMap),
volatileStore: options.Volatile, volatileStore: options.Volatile,
} }
if options.MountOpts != nil {
container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
container.Flags[volatileFlag] = true
}
r.containers = append(r.containers, container) r.containers = append(r.containers, container)
r.byid[id] = container // This can only fail on duplicate IDs, which shouldnt happen — and in
// This can only fail on duplicate IDs, which shouldnt happen — and in that case the index is already in the desired state anyway. // that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here would be too risky. // Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id) _ = r.idindex.Add(id)
r.byid[id] = container
r.bylayer[layer] = container r.bylayer[layer] = container
for _, name := range names { for _, name := range names {
r.byname[name] = container r.byname[name] = container
} }
defer func() {
if err != nil {
// now that the in-memory structures know about the new
// record, we can use regular Delete() to clean up if
// anything breaks from here on out
if e := r.Delete(id); e != nil {
logrus.Debugf("while cleaning up partially-created container %q we failed to create: %v", id, e)
}
}
}()
err = r.saveFor(container) err = r.saveFor(container)
if err != nil {
return nil, err
}
for _, item := range options.BigData {
if err = r.SetBigData(id, item.Key, item.Data); err != nil {
return nil, err
}
}
container = copyContainer(container) container = copyContainer(container)
return container, err return container, err
} }

View File

@ -239,7 +239,7 @@ func (a *Driver) Status() [][2]string {
// Metadata not implemented // Metadata not implemented
func (a *Driver) Metadata(id string) (map[string]string, error) { func (a *Driver) Metadata(id string) (map[string]string, error) {
return nil, nil return nil, nil //nolint: nilnil
} }
// Exists returns true if the given id is registered with // Exists returns true if the given id is registered with

View File

@ -157,7 +157,7 @@ func (d *Driver) Status() [][2]string {
// Metadata returns empty metadata for this driver. // Metadata returns empty metadata for this driver.
func (d *Driver) Metadata(id string) (map[string]string, error) { func (d *Driver) Metadata(id string) (map[string]string, error) {
return nil, nil return nil, nil //nolint: nilnil
} }
// Cleanup unmounts the home directory. // Cleanup unmounts the home directory.

View File

@ -41,7 +41,7 @@ const (
) )
// CopyRegularToFile copies the content of a file to another // CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
srcFile, err := os.Open(srcPath) srcFile, err := os.Open(srcPath)
if err != nil { if err != nil {
return err return err
@ -73,7 +73,7 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
} }
// CopyRegular copies the content of a file to another // CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
// If the destination file already exists, we shouldn't blow it away // If the destination file already exists, we shouldn't blow it away
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
if err != nil { if err != nil {

View File

@ -25,7 +25,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
} }
// CopyRegularToFile copies the content of a file to another // CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters" func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive,golint // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
f, err := os.Open(srcPath) f, err := os.Open(srcPath)
if err != nil { if err != nil {
return err return err
@ -36,6 +36,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
} }
// CopyRegular copies the content of a file to another // CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters" func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive,golint // "func name will be used as copy.CopyRegular by other packages, and that stutters"
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
} }

View File

@ -48,7 +48,7 @@ func validateLVMConfig(cfg directLVMConfig) error {
func checkDevAvailable(dev string) error { func checkDevAvailable(dev string) error {
lvmScan, err := exec.LookPath("lvmdiskscan") lvmScan, err := exec.LookPath("lvmdiskscan")
if err != nil { if err != nil {
logrus.Debug("could not find lvmdiskscan") logrus.Debugf("could not find lvmdiskscan: %v", err)
return nil return nil
} }
@ -67,7 +67,7 @@ func checkDevAvailable(dev string) error {
func checkDevInVG(dev string) error { func checkDevInVG(dev string) error {
pvDisplay, err := exec.LookPath("pvdisplay") pvDisplay, err := exec.LookPath("pvdisplay")
if err != nil { if err != nil {
logrus.Debug("could not find pvdisplay") logrus.Debugf("could not find pvdisplay: %v", err)
return nil return nil
} }
@ -96,7 +96,7 @@ func checkDevInVG(dev string) error {
func checkDevHasFS(dev string) error { func checkDevHasFS(dev string) error {
blkid, err := exec.LookPath("blkid") blkid, err := exec.LookPath("blkid")
if err != nil { if err != nil {
logrus.Debug("could not find blkid") logrus.Debugf("could not find blkid %v", err)
return nil return nil
} }

View File

@ -460,7 +460,7 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error {
var scan = func(path string, d fs.DirEntry, err error) error { var scan = func(path string, d fs.DirEntry, err error) error {
if err != nil { if err != nil {
logrus.Debugf("devmapper: Can't walk the file %s", path) logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err)
return nil return nil
} }
@ -2487,10 +2487,11 @@ func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSec
var params string var params string
_, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName)
if err != nil { if err != nil {
logrus.Debugf("could not find devicemapper status: %v", err)
return return
} }
if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err != nil {
return logrus.Debugf("could not find scanf devicemapper status: %v", err)
} }
return return
} }

View File

@ -52,8 +52,8 @@ type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX) // Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string MountLabel string
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap //nolint: golint,revive UidMaps []idtools.IDMap //nolint: revive,golint
GidMaps []idtools.IDMap //nolint: golint GidMaps []idtools.IDMap //nolint: revive,golint
Options []string Options []string
// Volatile specifies whether the container storage can be optimized // Volatile specifies whether the container storage can be optimized

View File

@ -78,7 +78,7 @@ func (c *defaultChecker) IsMounted(path string) bool {
} }
// Mounted checks if the given path is mounted as the fs type // Mounted checks if the given path is mounted as the fs type
//Solaris supports only ZFS for now // Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) { func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath)) cs := C.CString(filepath.Dir(mountPath))

View File

@ -17,7 +17,6 @@ import (
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
"unicode"
graphdriver "github.com/containers/storage/drivers" graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/drivers/overlayutils" "github.com/containers/storage/drivers/overlayutils"
@ -30,6 +29,7 @@ import (
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare" "github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units" units "github.com/docker/go-units"
@ -314,9 +314,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
} }
fsName, ok := graphdriver.FsNames[fsMagic] fsName, ok := graphdriver.FsNames[fsMagic]
if !ok { if !ok {
if opts.mountProgram == "" {
return nil, fmt.Errorf("filesystem type %#x reported for %s is not supported with 'overlay': %w", fsMagic, filepath.Dir(home), graphdriver.ErrIncompatibleFS)
}
fsName = "<unknown>" fsName = "<unknown>"
} }
backingFs = fsName backingFs = fsName
@ -549,6 +546,9 @@ func parseOptions(options []string) (*overlayOptions, error) {
case "skip_mount_home": case "skip_mount_home":
logrus.Debugf("overlay: skip_mount_home=%s", val) logrus.Debugf("overlay: skip_mount_home=%s", val)
o.skipMountHome, err = strconv.ParseBool(val) o.skipMountHome, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
case "ignore_chown_errors": case "ignore_chown_errors":
logrus.Debugf("overlay: ignore_chown_errors=%s", val) logrus.Debugf("overlay: ignore_chown_errors=%s", val)
o.ignoreChownErrors, err = strconv.ParseBool(val) o.ignoreChownErrors, err = strconv.ParseBool(val)
@ -685,8 +685,11 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
// Try a test mount in the specific location we're looking at using. // Try a test mount in the specific location we're looking at using.
mergedDir := filepath.Join(layerDir, "merged") mergedDir := filepath.Join(layerDir, "merged")
mergedSubdir := filepath.Join(mergedDir, "subdir")
lower1Dir := filepath.Join(layerDir, "lower1") lower1Dir := filepath.Join(layerDir, "lower1")
lower2Dir := filepath.Join(layerDir, "lower2") lower2Dir := filepath.Join(layerDir, "lower2")
lower2Subdir := filepath.Join(lower2Dir, "subdir")
lower2SubdirFile := filepath.Join(lower2Subdir, "file")
upperDir := filepath.Join(layerDir, "upper") upperDir := filepath.Join(layerDir, "upper")
workDir := filepath.Join(layerDir, "work") workDir := filepath.Join(layerDir, "work")
defer func() { defer func() {
@ -700,8 +703,15 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
_ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID)
_ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID)
_ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID)
_ = idtools.MkdirAs(lower2Subdir, 0700, rootUID, rootGID)
_ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID)
_ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID)
f, err := os.Create(lower2SubdirFile)
if err != nil {
logrus.Debugf("Unable to create test file: %v", err)
return supportsDType, fmt.Errorf("unable to create test file: %w", err)
}
f.Close()
flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir) flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir)
if selinux.GetEnabled() && if selinux.GetEnabled() &&
selinux.SecurityCheckContext(selinuxLabelTest) == nil { selinux.SecurityCheckContext(selinuxLabelTest) == nil {
@ -721,6 +731,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if len(flags) < unix.Getpagesize() { if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil { if err == nil {
if err = os.RemoveAll(mergedSubdir); err != nil {
logrus.StandardLogger().Logf(logLevel, "overlay: removing an item from the merged directory failed: %v", err)
return supportsDType, fmt.Errorf("kernel returned %v when we tried to delete an item in the merged directory: %w", err, graphdriver.ErrNotSupported)
}
logrus.Debugf("overlay: test mount with multiple lowers succeeded") logrus.Debugf("overlay: test mount with multiple lowers succeeded")
return supportsDType, nil return supportsDType, nil
} }
@ -1427,7 +1441,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
perms = os.FileMode(st2.Mode()) perms = os.FileMode(st2.Mode())
permsKnown = true permsKnown = true
} }
l = lower
break break
} }
lower = "" lower = ""
@ -1509,7 +1522,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
} }
} }
if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 { if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" {
var newAbsDir []string var newAbsDir []string
mappedRoot := filepath.Join(d.home, id, "mapped") mappedRoot := filepath.Join(d.home, id, "mapped")
if err := os.MkdirAll(mappedRoot, 0700); err != nil { if err := os.MkdirAll(mappedRoot, 0700); err != nil {
@ -1706,18 +1719,6 @@ func (d *Driver) Exists(id string) bool {
return err == nil return err == nil
} }
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}
// List layers (not including additional image stores) // List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) { func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(d.home) entries, err := os.ReadDir(d.home)
@ -1730,7 +1731,7 @@ func (d *Driver) ListLayers() ([]string, error) {
for _, entry := range entries { for _, entry := range entries {
id := entry.Name() id := entry.Name()
// Does it look like a datadir directory? // Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) { if !entry.IsDir() || stringid.ValidateID(id) != nil {
continue continue
} }
@ -1827,7 +1828,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
idMappings = &idtools.IDMappings{} idMappings = &idtools.IDMappings{}
} }
applyDir := "" var applyDir string
if id == "" { if id == "" {
err := os.MkdirAll(d.getStagingDir(), 0700) err := os.MkdirAll(d.getStagingDir(), 0700)

View File

@ -32,3 +32,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
func (q *Control) GetQuota(targetPath string, quota *Quota) error { func (q *Control) GetQuota(targetPath string, quota *Quota) error {
return errors.New("filesystem does not support, or has not enabled quotas") return errors.New("filesystem does not support, or has not enabled quotas")
} }
// ClearQuota removes the map entry in the quotas map for targetPath.
// It does so to prevent the map leaking entries as directories are deleted.
func (q *Control) ClearQuota(targetPath string) {}

View File

@ -8,13 +8,13 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"unicode"
graphdriver "github.com/containers/storage/drivers" graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/system"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -266,18 +266,6 @@ func (d *Driver) Exists(id string) bool {
return err == nil return err == nil
} }
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}
// List layers (not including additional image stores) // List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) { func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(d.homes[0]) entries, err := os.ReadDir(d.homes[0])
@ -290,7 +278,7 @@ func (d *Driver) ListLayers() ([]string, error) {
for _, entry := range entries { for _, entry := range entries {
id := entry.Name() id := entry.Name()
// Does it look like a datadir directory? // Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) { if !entry.IsDir() || stringid.ValidateID(id) != nil {
continue continue
} }

View File

@ -137,10 +137,10 @@ type rwImageStore interface {
// stopWriting releases locks obtained by startWriting. // stopWriting releases locks obtained by startWriting.
stopWriting() stopWriting()
// Create creates an image that has a specified ID (or a random one) and // create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully // optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images. // read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) create(id string, names []string, layer string, options ImageOptions) (*Image, error)
// updateNames modifies names associated with an image based on (op, names). // updateNames modifies names associated with an image based on (op, names).
// The values are expected to be valid normalized // The values are expected to be valid normalized
@ -414,7 +414,7 @@ func (r *imageStore) GarbageCollect() error {
for _, entry := range entries { for _, entry := range entries {
id := entry.Name() id := entry.Name()
// Does it look like a datadir directory? // Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) { if !entry.IsDir() || stringid.ValidateID(id) != nil {
continue continue
} }
@ -688,7 +688,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
} }
// Requires startWriting. // Requires startWriting.
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { func (r *imageStore) create(id string, names []string, layer string, options ImageOptions) (image *Image, err error) {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
} }
@ -709,30 +709,32 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName) return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName)
} }
} }
if created.IsZero() {
created = time.Now().UTC()
}
image = &Image{ image = &Image{
ID: id, ID: id,
Digest: searchableDigest, Digest: options.Digest,
Digests: nil, Digests: copyDigestSlice(options.Digests),
Names: names, Names: names,
NamesHistory: copyStringSlice(options.NamesHistory),
TopLayer: layer, TopLayer: layer,
Metadata: metadata, Metadata: options.Metadata,
BigDataNames: []string{}, BigDataNames: []string{},
BigDataSizes: make(map[string]int64), BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest), BigDataDigests: make(map[string]digest.Digest),
Created: created, Created: options.CreationDate,
Flags: make(map[string]interface{}), Flags: copyStringInterfaceMap(options.Flags),
}
if image.Created.IsZero() {
image.Created = time.Now().UTC()
} }
err = image.recomputeDigests() err = image.recomputeDigests()
if err != nil { if err != nil {
return nil, fmt.Errorf("validating digests for new image: %w", err) return nil, fmt.Errorf("validating digests for new image: %w", err)
} }
r.images = append(r.images, image) r.images = append(r.images, image)
// This can only fail on duplicate IDs, which shouldnt happen — and in that case the index is already in the desired state anyway. // This can only fail on duplicate IDs, which shouldnt happen — and in
// Implementing recovery from an unlikely and unimportant failure here would be too risky. // that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id) _ = r.idindex.Add(id)
r.byid[id] = image r.byid[id] = image
for _, name := range names { for _, name := range names {
@ -742,7 +744,28 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
list := r.bydigest[digest] list := r.bydigest[digest]
r.bydigest[digest] = append(list, image) r.bydigest[digest] = append(list, image)
} }
defer func() {
if err != nil {
// now that the in-memory structures know about the new
// record, we can use regular Delete() to clean up if
// anything breaks from here on out
if e := r.Delete(id); e != nil {
logrus.Debugf("while cleaning up partially-created image %q we failed to create: %v", id, e)
}
}
}()
err = r.Save() err = r.Save()
if err != nil {
return nil, err
}
for _, item := range options.BigData {
if item.Digest == "" {
item.Digest = digest.Canonical.FromBytes(item.Data)
}
if err = r.setBigData(image, item.Key, item.Data, item.Digest); err != nil {
return nil, err
}
}
image = copyImage(image) image = copyImage(image)
return image, err return image, err
} }
@ -965,9 +988,6 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
// Requires startWriting. // Requires startWriting.
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if key == "" {
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
}
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
} }
@ -975,10 +995,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
if !ok { if !ok {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
err := os.MkdirAll(r.datadir(image.ID), 0700) var err error
if err != nil {
return err
}
var newDigest digest.Digest var newDigest digest.Digest
if bigDataNameIsManifest(key) { if bigDataNameIsManifest(key) {
if digestManifest == nil { if digestManifest == nil {
@ -990,6 +1007,18 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
} else { } else {
newDigest = digest.Canonical.FromBytes(data) newDigest = digest.Canonical.FromBytes(data)
} }
return r.setBigData(image, key, data, newDigest)
}
// Requires startWriting.
func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest digest.Digest) error {
if key == "" {
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
}
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
return err
}
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil { if err == nil {
save := false save := false

View File

@ -112,33 +112,33 @@ type Layer struct {
Created time.Time `json:"created,omitempty"` Created time.Time `json:"created,omitempty"`
// CompressedDigest is the digest of the blob that was last passed to // CompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us. // ApplyDiff() or create(), as it was presented to us.
CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"` CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
// CompressedSize is the length of the blob that was last passed to // CompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us. If // ApplyDiff() or create(), as it was presented to us. If
// CompressedDigest is not set, this should be treated as if it were an // CompressedDigest is not set, this should be treated as if it were an
// uninitialized value. // uninitialized value.
CompressedSize int64 `json:"compressed-size,omitempty"` CompressedSize int64 `json:"compressed-size,omitempty"`
// UncompressedDigest is the digest of the blob that was last passed to // UncompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. Often referred to // ApplyDiff() or create(), after we decompressed it. Often referred to
// as a DiffID. // as a DiffID.
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
// UncompressedSize is the length of the blob that was last passed to // UncompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. If // ApplyDiff() or create(), after we decompressed it. If
// UncompressedDigest is not set, this should be treated as if it were // UncompressedDigest is not set, this should be treated as if it were
// an uninitialized value. // an uninitialized value.
UncompressedSize int64 `json:"diff-size,omitempty"` UncompressedSize int64 `json:"diff-size,omitempty"`
// CompressionType is the type of compression which we detected on the blob // CompressionType is the type of compression which we detected on the blob
// that was last passed to ApplyDiff() or Put(). // that was last passed to ApplyDiff() or create().
CompressionType archive.Compression `json:"compression,omitempty"` CompressionType archive.Compression `json:"compression,omitempty"`
// UIDs and GIDs are lists of UIDs and GIDs used in the layer. This // UIDs and GIDs are lists of UIDs and GIDs used in the layer. This
// field is only populated (i.e., will only contain one or more // field is only populated (i.e., will only contain one or more
// entries) if the layer was created using ApplyDiff() or Put(). // entries) if the layer was created using ApplyDiff() or create().
UIDs []uint32 `json:"uidset,omitempty"` UIDs []uint32 `json:"uidset,omitempty"`
GIDs []uint32 `json:"gidset,omitempty"` GIDs []uint32 `json:"gidset,omitempty"`
@ -248,20 +248,15 @@ type rwLayerStore interface {
// stopWriting releases locks obtained by startWriting. // stopWriting releases locks obtained by startWriting.
stopWriting() stopWriting()
// Create creates a new layer, optionally giving it a specified ID rather than // create creates a new layer, optionally giving it a specified ID rather than
// a randomly-generated one, either inheriting data from another specified // a randomly-generated one, either inheriting data from another specified
// layer or the empty base layer. The new layer can optionally be given names // layer or the empty base layer. The new layer can optionally be given names
// and have an SELinux label specified for use when mounting it. Some // and have an SELinux label specified for use when mounting it. Some
// underlying drivers can accept a "size" option. At this time, most // underlying drivers can accept a "size" option. At this time, most
// underlying drivers do not themselves distinguish between writeable // underlying drivers do not themselves distinguish between writeable
// and read-only layers. // and read-only layers. Returns the new layer structure and the size of the
Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) // diff which was applied to its parent to initialize its contents.
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error)
// CreateWithFlags combines the functions of Create and SetFlag.
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
// Put combines the functions of CreateWithFlags and ApplyDiff.
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
// updateNames modifies names associated with a layer based on (op, names). // updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error updateNames(id string, names []string, op updateNameOperation) error
@ -1186,8 +1181,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
// TODO: check if necessary fields are filled // TODO: check if necessary fields are filled
r.layers = append(r.layers, layer) r.layers = append(r.layers, layer)
// This can only fail on duplicate IDs, which shouldnt happen — and in that case the index is already in the desired state anyway. // This can only fail on duplicate IDs, which shouldnt happen — and in
// Implementing recovery from an unlikely and unimportant failure here would be too risky. // that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id) _ = r.idindex.Add(id)
r.byid[id] = layer r.byid[id] = layer
for _, name := range names { // names got from the additional layer store won't be used for _, name := range names { // names got from the additional layer store won't be used
@ -1200,8 +1197,8 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
} }
if err := r.saveFor(layer); err != nil { if err := r.saveFor(layer); err != nil {
if err2 := r.driver.Remove(id); err2 != nil { if e := r.Delete(layer.ID); e != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2) logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
} }
return nil, err return nil, err
} }
@ -1209,7 +1206,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
} }
// Requires startWriting. // Requires startWriting.
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) {
if moreOptions == nil {
moreOptions = &LayerOptions{}
}
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
} }
@ -1252,7 +1252,6 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateTSdata []byte templateTSdata []byte
) )
if moreOptions.TemplateLayer != "" { if moreOptions.TemplateLayer != "" {
var tserr error
templateLayer, ok := r.lookup(moreOptions.TemplateLayer) templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
if !ok { if !ok {
return nil, -1, ErrLayerUnknown return nil, -1, ErrLayerUnknown
@ -1263,9 +1262,9 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType templateCompressionType = templateLayer.CompressionType
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID)) templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
if tserr != nil && !os.IsNotExist(tserr) { if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, -1, tserr return nil, -1, err
} }
} else { } else {
templateIDMappings = &idtools.IDMappings{} templateIDMappings = &idtools.IDMappings{}
@ -1279,9 +1278,10 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
selinux.ReserveLabel(mountLabel) selinux.ReserveLabel(mountLabel)
} }
// Before actually creating the layer, make a persistent record of it with incompleteFlag, // Before actually creating the layer, make a persistent record of it
// so that future processes have a chance to delete it. // with the incomplete flag set, so that future processes have a chance
layer := &Layer{ // to clean up after it.
layer = &Layer{
ID: id, ID: id,
Parent: parent, Parent: parent,
Names: names, Names: names,
@ -1295,98 +1295,109 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
CompressionType: templateCompressionType, CompressionType: templateCompressionType,
UIDs: templateUIDs, UIDs: templateUIDs,
GIDs: templateGIDs, GIDs: templateGIDs,
Flags: make(map[string]interface{}), Flags: copyStringInterfaceMap(moreOptions.Flags),
UIDMap: copyIDMap(moreOptions.UIDMap), UIDMap: copyIDMap(moreOptions.UIDMap),
GIDMap: copyIDMap(moreOptions.GIDMap), GIDMap: copyIDMap(moreOptions.GIDMap),
BigDataNames: []string{}, BigDataNames: []string{},
volatileStore: moreOptions.Volatile, volatileStore: moreOptions.Volatile,
} }
layer.Flags[incompleteFlag] = true
r.layers = append(r.layers, layer) r.layers = append(r.layers, layer)
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway. // This can only fail if the ID is already missing, which shouldnt
// This is on various paths to recover from failures, so this should be robust against partially missing data. // happen — and in that case the index is already in the desired state
// anyway. This is on various paths to recover from failures, so this
// should be robust against partially missing data.
_ = r.idindex.Add(id) _ = r.idindex.Add(id)
r.byid[id] = layer r.byid[id] = layer
for _, name := range names { for _, name := range names {
r.byname[name] = layer r.byname[name] = layer
} }
for flag, value := range flags {
layer.Flags[flag] = value
}
layer.Flags[incompleteFlag] = true
succeeded := false
cleanupFailureContext := "" cleanupFailureContext := ""
defer func() { defer func() {
if !succeeded { if err != nil {
// On any error, try both removing the driver's data as well // now that the in-memory structures know about the new
// as the in-memory layer record. // record, we can use regular Delete() to clean up if
if err2 := r.Delete(layer.ID); err2 != nil { // anything breaks from here on out
if cleanupFailureContext == "" { if cleanupFailureContext == "" {
cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
} }
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2) if e := r.Delete(id); e != nil {
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, id, e)
} }
} }
}() }()
err := r.saveFor(layer) if err = r.saveFor(layer); err != nil {
if err != nil {
cleanupFailureContext = "saving incomplete layer metadata" cleanupFailureContext = "saving incomplete layer metadata"
return nil, -1, err return nil, -1, err
} }
for _, item := range moreOptions.BigData {
if err = r.setBigData(layer, item.Key, item.Data); err != nil {
cleanupFailureContext = fmt.Sprintf("saving big data item %q", item.Key)
return nil, -1, err
}
}
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{ opts := drivers.CreateOpts{
MountLabel: mountLabel, MountLabel: mountLabel,
StorageOpt: options, StorageOpt: options,
IDMappings: idMappings, IDMappings: idMappings,
} }
if moreOptions.TemplateLayer != "" { if moreOptions.TemplateLayer != "" {
if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
cleanupFailureContext = "creating a layer from template" cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer)
return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err) return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err)
} }
oldMappings = templateIDMappings oldMappings = templateIDMappings
} else { } else {
if writeable { if writeable {
if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil { if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-write layer" cleanupFailureContext = "creating a read-write layer"
return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err) return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err)
} }
} else { } else {
if err := r.driver.Create(id, parent, &opts); err != nil { if err = r.driver.Create(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-only layer" cleanupFailureContext = "creating a read-only layer"
return nil, -1, fmt.Errorf("creating layer with ID %q: %w", id, err) return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err)
} }
} }
oldMappings = parentMappings oldMappings = parentMappings
} }
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
cleanupFailureContext = "in UpdateLayerIDMap" cleanupFailureContext = "in UpdateLayerIDMap"
return nil, -1, err return nil, -1, err
} }
} }
if len(templateTSdata) > 0 { if len(templateTSdata) > 0 {
if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { if err = os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
cleanupFailureContext = "creating tar-split parent directory for a copy from template" cleanupFailureContext = "creating tar-split parent directory for a copy from template"
return nil, -1, err return nil, -1, err
} }
if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
cleanupFailureContext = "creating a tar-split copy from template" cleanupFailureContext = "creating a tar-split copy from template"
return nil, -1, err return nil, -1, err
} }
} }
var size int64 = -1 size = -1
if diff != nil { if diff != nil {
size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) if size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff); err != nil {
if err != nil {
cleanupFailureContext = "applying layer diff" cleanupFailureContext = "applying layer diff"
return nil, -1, err return nil, -1, err
} }
} else { } else {
// applyDiffWithOptions in the `diff != nil` case handles this bit for us // applyDiffWithOptions() would have updated r.bycompressedsum
// and r.byuncompressedsum for us, but if we used a template
// layer, we didn't call it, so add the new layer as candidates
// for searches for layers by checksum
if layer.CompressedDigest != "" { if layer.CompressedDigest != "" {
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
} }
@ -1394,29 +1405,17 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
} }
} }
delete(layer.Flags, incompleteFlag) delete(layer.Flags, incompleteFlag)
err = r.saveFor(layer) if err = r.saveFor(layer); err != nil {
if err != nil {
cleanupFailureContext = "saving finished layer metadata" cleanupFailureContext = "saving finished layer metadata"
return nil, -1, err return nil, -1, err
} }
layer = copyLayer(layer) layer = copyLayer(layer)
succeeded = true
return layer, size, err return layer, size, err
} }
// Requires startWriting.
func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil)
return layer, err
}
// Requires startWriting.
func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) {
return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
}
// Requires startReading or startWriting. // Requires startReading or startWriting.
func (r *layerStore) Mounted(id string) (int, error) { func (r *layerStore) Mounted(id string) (int, error) {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
@ -1677,9 +1676,6 @@ func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
// Requires startWriting. // Requires startWriting.
func (r *layerStore) SetBigData(id, key string, data io.Reader) error { func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
} }
@ -1687,6 +1683,13 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if !ok { if !ok {
return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown) return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown)
} }
return r.setBigData(layer, key, data)
}
func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error {
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
err := os.MkdirAll(r.datadir(layer.ID), 0700) err := os.MkdirAll(r.datadir(layer.ID), 0700)
if err != nil { if err != nil {
return err return err
@ -1759,7 +1762,9 @@ func (r *layerStore) tspath(id string) string {
// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true // layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
// The caller must hold r.inProcessLock for reading. // The caller must hold r.inProcessLock for reading.
func layerHasIncompleteFlag(layer *Layer) bool { func layerHasIncompleteFlag(layer *Layer) bool {
// layer.Flags[…] is defined to succeed and return ok == false if Flags == nil if layer.Flags == nil {
return false
}
if flagValue, ok := layer.Flags[incompleteFlag]; ok { if flagValue, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := flagValue.(bool); ok && b { if b, ok := flagValue.(bool); ok && b {
return true return true
@ -1788,20 +1793,21 @@ func (r *layerStore) deleteInternal(id string) error {
} }
} }
// We never unset incompleteFlag; below, we remove the entire object from r.layers. // We never unset incompleteFlag; below, we remove the entire object from r.layers.
id = layer.ID id = layer.ID
if err := r.driver.Remove(id); err != nil { if err := r.driver.Remove(id); err != nil && !errors.Is(err, os.ErrNotExist) {
return err return err
} }
os.Remove(r.tspath(id)) os.Remove(r.tspath(id))
os.RemoveAll(r.datadir(id)) os.RemoveAll(r.datadir(id))
delete(r.byid, id) delete(r.byid, id)
for _, name := range layer.Names { for _, name := range layer.Names {
delete(r.byname, name) delete(r.byname, name)
} }
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway. // This can only fail if the ID is already missing, which shouldnt
// The stores Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. // happen — and in that case the index is already in the desired state
// anyway. The stores Delete method is used on various paths to
// recover from failures, so this should be robust against partially
// missing data.
_ = r.idindex.Delete(id) _ = r.idindex.Delete(id)
mountLabel := layer.MountLabel mountLabel := layer.MountLabel
if layer.MountPoint != "" { if layer.MountPoint != "" {
@ -1835,7 +1841,6 @@ func (r *layerStore) deleteInternal(id string) error {
selinux.ReleaseLabel(mountLabel) selinux.ReleaseLabel(mountLabel)
} }
} }
return nil return nil
} }

View File

@ -53,7 +53,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
} }
// If there are no lower layers, then it can't have been deleted in this layer. // If there are no lower layers, then it can't have been deleted in this layer.
if len(o.rolayers) == 0 { if len(o.rolayers) == 0 {
return nil, nil return nil, nil //nolint: nilnil
} }
// At this point, we have a directory that's opaque. If it appears in one of the lower // At this point, we have a directory that's opaque. If it appears in one of the lower
// layers, then it was newly-created here, so it wasn't also deleted here. // layers, then it was newly-created here, so it wasn't also deleted here.
@ -66,7 +66,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
if statErr == nil { if statErr == nil {
if stat.Mode()&os.ModeCharDevice != 0 { if stat.Mode()&os.ModeCharDevice != 0 {
if isWhiteOut(stat) { if isWhiteOut(stat) {
return nil, nil return nil, nil //nolint: nilnil
} }
} }
// It's not whiteout, so it was there in the older layer, so we need to // It's not whiteout, so it was there in the older layer, so we need to
@ -100,7 +100,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
// original directory wasn't inherited into this layer, // original directory wasn't inherited into this layer,
// so we don't need to emit whiteout for it. // so we don't need to emit whiteout for it.
if isWhiteOut(stat) { if isWhiteOut(stat) {
return nil, nil return nil, nil //nolint: nilnil
} }
} }
} }

View File

@ -30,8 +30,8 @@ type walker struct {
dir2 string dir2 string
root1 *FileInfo root1 *FileInfo
root2 *FileInfo root2 *FileInfo
idmap1 *idtools.IDMappings idmap1 *idtools.IDMappings //nolint:unused
idmap2 *idtools.IDMappings idmap2 *idtools.IDMappings //nolint:unused
} }
// collectFileInfoForChanges returns a complete representation of the trees // collectFileInfoForChanges returns a complete representation of the trees

View File

@ -17,8 +17,8 @@ import (
// Generate("foo.txt", "hello world", "emptyfile") // Generate("foo.txt", "hello world", "emptyfile")
// //
// The above call will return an archive with 2 files: // The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world" // - ./foo.txt with content "hello world"
// * ./empty with empty content // - ./empty with empty content
// //
// FIXME: stream content instead of buffering // FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata // FIXME: specify permissions and other archive metadata

View File

@ -361,7 +361,7 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
return nil, err return nil, err
} }
if version != cacheVersion { if version != cacheVersion {
return nil, nil return nil, nil //nolint: nilnil
} }
if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
return nil, err return nil, err
@ -398,7 +398,8 @@ func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
toc, err := unmarshalToc(manifest) toc, err := unmarshalToc(manifest)
if err != nil { if err != nil {
// ignore errors here. They might be caused by a different manifest format. // ignore errors here. They might be caused by a different manifest format.
return nil, nil logrus.Debugf("could not unmarshal manifest: %v", err)
return nil, nil //nolint: nilnil
} }
var r []*internal.FileMetadata var r []*internal.FileMetadata

View File

@ -3,6 +3,7 @@ package chunked
import ( import (
archivetar "archive/tar" archivetar "archive/tar"
"bytes" "bytes"
"context"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -149,7 +150,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified. // be specified.
// This function uses the io.github.containers.zstd-chunked. annotations when specified. // This function uses the io.github.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
footerSize := int64(internal.FooterSizeSupported) footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize { if blobSize <= footerSize {
return nil, 0, errors.New("blob too small") return nil, 0, errors.New("blob too small")
@ -244,7 +245,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
return nil, 0, errors.New("invalid manifest checksum") return nil, 0, errors.New("invalid manifest checksum")
} }
decoder, err := zstd.NewReader(nil) decoder, err := zstd.NewReader(nil) //nolint:contextcheck
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }

View File

@ -147,7 +147,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat
} }
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) manifest, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
if err != nil { if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
} }
@ -279,6 +279,7 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
digest, err := digest.Parse(file.Digest) digest, err := digest.Parse(file.Digest)
if err != nil { if err != nil {
logrus.Debugf("could not parse digest: %v", err)
return false, nil, 0, nil return false, nil, 0, nil
} }
payloadLink := digest.Encoded() + ".payload-link" payloadLink := digest.Encoded() + ".payload-link"
@ -297,6 +298,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di
} }
fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
if err != nil { if err != nil {
logrus.Debugf("could not open sourceFile %s: %v", sourceFile, err)
return false, nil, 0, nil return false, nil, 0, nil
} }
f := os.NewFile(uintptr(fd), "fd") f := os.NewFile(uintptr(fd), "fd")
@ -309,6 +311,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di
dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks)
if err != nil { if err != nil {
logrus.Debugf("could not copyFileContent: %v", err)
return false, nil, 0, nil return false, nil, 0, nil
} }
return true, dstFile, written, nil return true, dstFile, written, nil
@ -503,7 +506,7 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 hasNoFollow := (flags & unix.O_NOFOLLOW) != 0
fd := -1 var fd int
// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
// last component as the path to openat(). // last component as the path to openat().
if hasNoFollow { if hasNoFollow {
@ -1180,7 +1183,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
func checkChownErr(err error, name string, uid, gid int) error { func checkChownErr(err error, name string, uid, gid int) error {
if errors.Is(err, syscall.EINVAL) { if errors.Is(err, syscall.EINVAL) {
return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err)
} }
return err return err
} }
@ -1302,10 +1305,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
var missingParts []missingPart var missingParts []missingPart
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil { if err != nil {
return output, err return output, err
} }
output.Size = totalSize
if err := maybeDoIDRemap(mergedEntries, options); err != nil { if err := maybeDoIDRemap(mergedEntries, options); err != nil {
return output, err return output, err
} }
@ -1589,7 +1595,9 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
return false return false
} }
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, int64, error) {
var totalFilesSize int64
countNextChunks := func(start int) int { countNextChunks := func(start int) int {
count := 0 count := 0
for _, e := range entries[start:] { for _, e := range entries[start:] {
@ -1618,8 +1626,11 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
if mustSkipFile(fileType, e) { if mustSkipFile(fileType, e) {
continue continue
} }
totalFilesSize += e.Size
if e.Type == TypeChunk { if e.Type == TypeChunk {
return nil, fmt.Errorf("chunk type without a regular file") return nil, -1, fmt.Errorf("chunk type without a regular file")
} }
if e.Type == TypeReg { if e.Type == TypeReg {
@ -1652,7 +1663,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
lastChunkOffset = mergedEntries[i].Chunks[j].Offset lastChunkOffset = mergedEntries[i].Chunks[j].Offset
} }
} }
return mergedEntries, nil return mergedEntries, totalFilesSize, nil
} }
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the // validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the

View File

@ -15,7 +15,7 @@ import (
) )
// Same as DM_DEVICE_* enum values from libdevmapper.h // Same as DM_DEVICE_* enum values from libdevmapper.h
// nolint: deadcode // nolint: unused
const ( const (
deviceCreate TaskType = iota deviceCreate TaskType = iota
deviceReload deviceReload
@ -198,13 +198,6 @@ func (t *Task) setAddNode(addNode AddNodeType) error {
return nil return nil
} }
func (t *Task) setRo() error {
if res := DmTaskSetRo(t.unmanaged); res != 1 {
return ErrTaskSetRo
}
return nil
}
func (t *Task) addTarget(start, size uint64, ttype, params string) error { func (t *Task) addTarget(start, size uint64, ttype, params string) error {
if res := DmTaskAddTarget(t.unmanaged, start, size, if res := DmTaskAddTarget(t.unmanaged, start, size,
ttype, params); res != 1 { ttype, params); res != 1 {
@ -213,7 +206,7 @@ func (t *Task) addTarget(start, size uint64, ttype, params string) error {
return nil return nil
} }
func (t *Task) getDeps() (*Deps, error) { func (t *Task) getDeps() (*Deps, error) { //nolint:unused
var deps *Deps var deps *Deps
if deps = DmTaskGetDeps(t.unmanaged); deps == nil { if deps = DmTaskGetDeps(t.unmanaged); deps == nil {
return nil, ErrTaskGetDeps return nil, ErrTaskGetDeps

View File

@ -39,6 +39,7 @@ func LogInit(logger DevmapperLogger) {
// because we are using callbacks, this function will be called for *every* log // because we are using callbacks, this function will be called for *every* log
// in libdm (even debug ones because there's no way of setting the verbosity // in libdm (even debug ones because there's no way of setting the verbosity
// level for an external logging callback). // level for an external logging callback).
//
//export StorageDevmapperLogCallback //export StorageDevmapperLogCallback
func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) {
msg := C.GoString(message) msg := C.GoString(message)

View File

@ -165,7 +165,7 @@ func (pm *PatternMatcher) Patterns() []*Pattern {
return pm.patterns return pm.patterns
} }
// Pattern defines a single regexp used used to filter file paths. // Pattern defines a single regexp used to filter file paths.
type Pattern struct { type Pattern struct {
cleanedPattern string cleanedPattern string
dirs []string dirs []string

View File

@ -34,10 +34,10 @@ func openTree(path string, flags int) (fd int, err error) {
if e1 != 0 { if e1 != 0 {
err = e1 err = e1
} }
return int(r), nil return int(r), err
} }
// moveMount is a wrapper for the the move_mount syscall. // moveMount is a wrapper for the move_mount syscall.
func moveMount(fdTree int, target string) (err error) { func moveMount(fdTree int, target string) (err error) {
var _p0, _p1 *byte var _p0, _p1 *byte

View File

@ -362,7 +362,7 @@ func parseSubidFile(path, username string) (ranges, error) {
func checkChownErr(err error, name string, uid, gid int) error { func checkChownErr(err error, name string, uid, gid int) error {
var e *os.PathError var e *os.PathError
if errors.As(err, &e) && e.Err == syscall.EINVAL { if errors.As(err, &e) && e.Err == syscall.EINVAL {
return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err)
} }
return err return err
} }

View File

@ -24,13 +24,14 @@ func ParseKeyValueOpt(opt string) (string, string, error) {
// input string. It returns a `map[int]bool` with available elements from `val` // input string. It returns a `map[int]bool` with available elements from `val`
// set to `true`. // set to `true`.
// Supported formats: // Supported formats:
// 7 //
// 1-6 // 7
// 0,3-4,7,8-10 // 1-6
// 0-0,0,1-7 // 0,3-4,7,8-10
// 03,1-3 <- this is gonna get parsed as [1,2,3] // 0-0,0,1-7
// 3,2,1 // 03,1-3 <- this is gonna get parsed as [1,2,3]
// 0-2,3,1 // 3,2,1
// 0-2,3,1
func ParseUintList(val string) (map[int]bool, error) { func ParseUintList(val string) (map[int]bool, error) {
if val == "" { if val == "" {
return map[int]bool{}, nil return map[int]bool{}, nil

View File

@ -49,7 +49,7 @@ func panicIfNotInitialized() {
} }
} }
func naiveSelf() string { func naiveSelf() string { //nolint: unused
name := os.Args[0] name := os.Args[0]
if filepath.Base(name) == name { if filepath.Base(name) == name {
if lp, err := exec.LookPath(name); err == nil { if lp, err := exec.LookPath(name); err == nil {

View File

@ -7,9 +7,9 @@ import (
"time" "time"
) )
//setCTime will set the create time on a file. On Unix, the create // setCTime will set the create time on a file. On Unix, the create
//time is updated as a side effect of setting the modified time, so // time is updated as a side effect of setting the modified time, so
//no action is required. // no action is required.
func setCTime(path string, ctime time.Time) error { func setCTime(path string, ctime time.Time) error {
return nil return nil
} }

View File

@ -9,8 +9,8 @@ import (
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
//setCTime will set the create time on a file. On Windows, this requires // setCTime will set the create time on a file. On Windows, this requires
//calling SetFileTime and explicitly including the create time. // calling SetFileTime and explicitly including the create time.
func setCTime(path string, ctime time.Time) error { func setCTime(path string, ctime time.Time) error {
ctimespec := windows.NsecToTimespec(ctime.UnixNano()) ctimespec := windows.NsecToTimespec(ctime.UnixNano())
pathp, e := windows.UTF16PtrFromString(path) pathp, e := windows.UTF16PtrFromString(path)

View File

@ -59,7 +59,8 @@ func getSwapInfo() (int64, int64, error) {
} }
// ReadMemInfo retrieves memory statistics of the host system and returns a // ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type. //
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) { func ReadMemInfo() (*MemInfo, error) {
MemTotal, MemFree, err := getMemInfo() MemTotal, MemFree, err := getMemInfo()
if err != nil { if err != nil {

View File

@ -81,7 +81,8 @@ func getFreeMem() int64 {
} }
// ReadMemInfo retrieves memory statistics of the host system and returns a // ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type. //
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) { func ReadMemInfo() (*MemInfo, error) {
ppKernel := C.getPpKernel() ppKernel := C.getPpKernel()

View File

@ -27,7 +27,8 @@ type memorystatusex struct {
} }
// ReadMemInfo retrieves memory statistics of the host system and returns a // ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type. //
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) { func ReadMemInfo() (*MemInfo, error) {
msi := &memorystatusex{ msi := &memorystatusex{
dwLength: 64, dwLength: 64,

View File

@ -33,9 +33,9 @@ type Cmd struct {
*exec.Cmd *exec.Cmd
UnshareFlags int UnshareFlags int
UseNewuidmap bool UseNewuidmap bool
UidMappings []specs.LinuxIDMapping // nolint: golint UidMappings []specs.LinuxIDMapping // nolint: revive,golint
UseNewgidmap bool UseNewgidmap bool
GidMappings []specs.LinuxIDMapping // nolint: golint GidMappings []specs.LinuxIDMapping // nolint: revive,golint
GidMappingsEnableSetgroups bool GidMappingsEnableSetgroups bool
Setsid bool Setsid bool
Setpgrp bool Setpgrp bool
@ -175,12 +175,11 @@ func (c *Cmd) Start() error {
pidWrite = nil pidWrite = nil
// Read the child's PID from the pipe. // Read the child's PID from the pipe.
pidString := ""
b := new(bytes.Buffer) b := new(bytes.Buffer)
if _, err := io.Copy(b, pidRead); err != nil { if _, err := io.Copy(b, pidRead); err != nil {
return fmt.Errorf("reading child PID: %w", err) return fmt.Errorf("reading child PID: %w", err)
} }
pidString = b.String() pidString := b.String()
pid, err := strconv.Atoi(pidString) pid, err := strconv.Atoi(pidString)
if err != nil { if err != nil {
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
@ -451,7 +450,7 @@ type Runnable interface {
Run() error Run() error
} }
func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname
if err != nil { if err != nil {
if format != "" { if format != "" {
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)

View File

@ -34,6 +34,8 @@ graphroot = "/var/lib/containers/storage"
# Transient store mode makes all container metadata be saved in temporary storage # Transient store mode makes all container metadata be saved in temporary storage
# (i.e. runroot above). This is faster, but doesn't persist across reboots. # (i.e. runroot above). This is faster, but doesn't persist across reboots.
# Additional garbage collection must also be performed at boot-time, so this
# option should remain disabled in most configurations.
# transient_store = true # transient_store = true
[storage.options] [storage.options]

View File

@ -506,10 +506,13 @@ type Store interface {
// GetDigestLock returns digest-specific Locker. // GetDigestLock returns digest-specific Locker.
GetDigestLock(digest.Digest) (Locker, error) GetDigestLock(digest.Digest) (Locker, error)
// LayerFromAdditionalLayerStore searches layers from the additional layer store and // LayerFromAdditionalLayerStore searches the additional layer store and returns an object
// returns the object for handling this. Note that this hasn't been stored to this store // which can create a layer with the specified digest associated with the specified image
// yet so this needs to be done through PutAs method. // reference. Note that this hasn't been stored to this store yet: the actual creation of
// Releasing AdditionalLayer handler is caller's responsibility. // a usable layer is done by calling the returned object's PutAs() method. After creating
// a layer, the caller must then call the object's Release() method to free any temporary
// resources which were allocated for the object by this method or the object's PutAs()
// method.
// This API is experimental and can be changed without bumping the major version number. // This API is experimental and can be changed without bumping the major version number.
LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
@ -562,6 +565,17 @@ type LayerOptions struct {
UncompressedDigest digest.Digest UncompressedDigest digest.Digest
// True is the layer info can be treated as volatile // True is the layer info can be treated as volatile
Volatile bool Volatile bool
// BigData is a set of items which should be stored with the layer.
BigData []LayerBigDataOption
// Flags is a set of named flags and their values to store with the layer.
// Currently these can only be set when the layer record is created, but that
// could change in the future.
Flags map[string]interface{}
}
type LayerBigDataOption struct {
Key string
Data io.Reader
} }
// ImageOptions is used for passing options to a Store's CreateImage() method. // ImageOptions is used for passing options to a Store's CreateImage() method.
@ -571,6 +585,26 @@ type ImageOptions struct {
CreationDate time.Time CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional. // Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest Digest digest.Digest
// Digests is a list of digest values of the image's manifests, and
// possibly a manually-specified value, that we can use to locate the
// image. If Digest is set, its value is also in this list.
Digests []digest.Digest
// Metadata is caller-specified metadata associated with the layer.
Metadata string
// BigData is a set of items which should be stored with the image.
BigData []ImageBigDataOption
// NamesHistory is used for guessing for what this image was named when a container was created based
// on it, but it no longer has any names.
NamesHistory []string
// Flags is a set of named flags and their values to store with the image. Currently these can only
// be set when the image record is created, but that could change in the future.
Flags map[string]interface{}
}
type ImageBigDataOption struct {
Key string
Data []byte
Digest digest.Digest
} }
// ContainerOptions is used for passing options to a Store's CreateContainer() method. // ContainerOptions is used for passing options to a Store's CreateContainer() method.
@ -580,11 +614,23 @@ type ContainerOptions struct {
// container's layer will inherit settings from the image's top layer // container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object. // or, if it is not being created based on an image, the Store object.
types.IDMappingOptions types.IDMappingOptions
LabelOpts []string LabelOpts []string
// Flags is a set of named flags and their values to store with the container.
// Currently these can only be set when the container record is created, but that
// could change in the future.
Flags map[string]interface{} Flags map[string]interface{}
MountOpts []string MountOpts []string
Volatile bool Volatile bool
StorageOpt map[string]string StorageOpt map[string]string
// Metadata is caller-specified metadata associated with the container.
Metadata string
// BigData is a set of items which should be stored for the container.
BigData []ContainerBigDataOption
}
type ContainerBigDataOption struct {
Key string
Data []byte
} }
type store struct { type store struct {
@ -1221,7 +1267,7 @@ func canUseShifting(store rwLayerStore, uidmap, gidmap []idtools.IDMap) bool {
return true return true
} }
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) { func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
var parentLayer *Layer var parentLayer *Layer
rlstore, rlstores, err := s.bothLayerStoreKinds() rlstore, rlstores, err := s.bothLayerStoreKinds()
if err != nil { if err != nil {
@ -1235,8 +1281,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
return nil, -1, err return nil, -1, err
} }
defer s.containerStore.stopWriting() defer s.containerStore.stopWriting()
if options == nil { var options LayerOptions
options = &LayerOptions{} if lOptions != nil {
options = *lOptions
options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData)
options.Flags = copyStringInterfaceMap(lOptions.Flags)
} }
if options.HostUIDMapping { if options.HostUIDMapping {
options.UIDMap = nil options.UIDMap = nil
@ -1303,7 +1352,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
GIDMap: copyIDMap(gidMap), GIDMap: copyIDMap(gidMap),
} }
} }
return rlstore.Put(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, nil, diff) return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff)
} }
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
@ -1311,7 +1360,7 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string
return layer, err return layer, err
} }
func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { func (s *store) CreateImage(id string, names []string, layer, metadata string, iOptions *ImageOptions) (*Image, error) {
if layer != "" { if layer != "" {
layerStores, err := s.allLayerStores() layerStores, err := s.allLayerStores()
if err != nil { if err != nil {
@ -1337,13 +1386,22 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
var res *Image var res *Image
err := s.writeToImageStore(func() error { err := s.writeToImageStore(func() error {
creationDate := time.Now().UTC() var options ImageOptions
if options != nil && !options.CreationDate.IsZero() {
creationDate = options.CreationDate if iOptions != nil {
options = *iOptions
options.Digests = copyDigestSlice(iOptions.Digests)
options.BigData = copyImageBigDataOptionSlice(iOptions.BigData)
options.NamesHistory = copyStringSlice(iOptions.NamesHistory)
options.Flags = copyStringInterfaceMap(iOptions.Flags)
} }
if options.CreationDate.IsZero() {
options.CreationDate = time.Now().UTC()
}
options.Metadata = metadata
var err error var err error
res, err = s.imageStore.Create(id, names, layer, metadata, creationDate, options.Digest) res, err = s.imageStore.create(id, names, layer, options)
return err return err
}) })
return res, err return res, err
@ -1426,26 +1484,22 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
// mappings, and register it as an alternate top layer in the image. // mappings, and register it as an alternate top layer in the image.
var layerOptions LayerOptions var layerOptions LayerOptions
if canUseShifting(rlstore, options.UIDMap, options.GIDMap) { if canUseShifting(rlstore, options.UIDMap, options.GIDMap) {
layerOptions = LayerOptions{ layerOptions.IDMappingOptions = types.IDMappingOptions{
IDMappingOptions: types.IDMappingOptions{ HostUIDMapping: true,
HostUIDMapping: true, HostGIDMapping: true,
HostGIDMapping: true, UIDMap: nil,
UIDMap: nil, GIDMap: nil,
GIDMap: nil,
},
} }
} else { } else {
layerOptions = LayerOptions{ layerOptions.IDMappingOptions = types.IDMappingOptions{
IDMappingOptions: types.IDMappingOptions{ HostUIDMapping: options.HostUIDMapping,
HostUIDMapping: options.HostUIDMapping, HostGIDMapping: options.HostGIDMapping,
HostGIDMapping: options.HostGIDMapping, UIDMap: copyIDMap(options.UIDMap),
UIDMap: copyIDMap(options.UIDMap), GIDMap: copyIDMap(options.GIDMap),
GIDMap: copyIDMap(options.GIDMap),
},
} }
} }
layerOptions.TemplateLayer = layer.ID layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
} }
@ -1459,9 +1513,17 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
return mappedLayer, nil return mappedLayer, nil
} }
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, cOptions *ContainerOptions) (*Container, error) {
if options == nil { var options ContainerOptions
options = &ContainerOptions{} if cOptions != nil {
options = *cOptions
options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap)
options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap)
options.LabelOpts = copyStringSlice(cOptions.LabelOpts)
options.Flags = copyStringInterfaceMap(cOptions.Flags)
options.MountOpts = copyStringSlice(cOptions.MountOpts)
options.StorageOpt = copyStringStringMap(cOptions.StorageOpt)
options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData)
} }
if options.HostUIDMapping { if options.HostUIDMapping {
options.UIDMap = nil options.UIDMap = nil
@ -1469,6 +1531,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if options.HostGIDMapping { if options.HostGIDMapping {
options.GIDMap = nil options.GIDMap = nil
} }
options.Metadata = metadata
rlstore, lstores, err := s.bothLayerStoreKinds() // lstores will be locked read-only if image != "" rlstore, lstores, err := s.bothLayerStoreKinds() // lstores will be locked read-only if image != ""
if err != nil { if err != nil {
return nil, err return nil, err
@ -1574,22 +1637,19 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
Volatile: options.Volatile || s.transientStore, Volatile: options.Volatile || s.transientStore,
} }
if canUseShifting(rlstore, uidMap, gidMap) { if canUseShifting(rlstore, uidMap, gidMap) {
layerOptions.IDMappingOptions = layerOptions.IDMappingOptions = types.IDMappingOptions{
types.IDMappingOptions{ HostUIDMapping: true,
HostUIDMapping: true, HostGIDMapping: true,
HostGIDMapping: true, UIDMap: nil,
UIDMap: nil, GIDMap: nil,
GIDMap: nil, }
}
} else { } else {
layerOptions.IDMappingOptions = layerOptions.IDMappingOptions = types.IDMappingOptions{
types.IDMappingOptions{ HostUIDMapping: idMappingsOptions.HostUIDMapping,
HostUIDMapping: idMappingsOptions.HostUIDMapping, HostGIDMapping: idMappingsOptions.HostGIDMapping,
HostGIDMapping: idMappingsOptions.HostGIDMapping, UIDMap: copyIDMap(uidMap),
UIDMap: copyIDMap(uidMap), GIDMap: copyIDMap(gidMap),
GIDMap: copyIDMap(gidMap), }
}
} }
if options.Flags == nil { if options.Flags == nil {
options.Flags = make(map[string]interface{}) options.Flags = make(map[string]interface{})
@ -1610,7 +1670,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.Flags[mountLabelFlag] = mountLabel options.Flags[mountLabelFlag] = mountLabel
} }
clayer, err := rlstore.Create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true) clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1630,7 +1690,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
GIDMap: copyIDMap(options.GIDMap), GIDMap: copyIDMap(options.GIDMap),
} }
var err error var err error
container, err = s.containerStore.Create(id, names, imageID, layer, metadata, options) container, err = s.containerStore.create(id, names, imageID, layer, &options)
if err != nil || container == nil { if err != nil || container == nil {
if err2 := rlstore.Delete(layer); err2 != nil { if err2 := rlstore.Delete(layer); err2 != nil {
if err == nil { if err == nil {
@ -2117,7 +2177,8 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e
return s.imageStore.updateNames(id, deduped, op) return s.imageStore.updateNames(id, deduped, op)
} }
// Check is id refers to a RO Store // Check if the id refers to a read-only image store -- we want to allow images in
// read-only stores to have their names changed.
for _, is := range s.roImageStores { for _, is := range s.roImageStores {
store := is store := is
if err := store.startReading(); err != nil { if err := store.startReading(); err != nil {
@ -2125,12 +2186,35 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e
} }
defer store.stopReading() defer store.stopReading()
if i, err := store.Get(id); err == nil { if i, err := store.Get(id); err == nil {
if len(deduped) > 1 { // "pull up" the image so that we can change its names list
// Do not want to create image name in R/W storage options := ImageOptions{
deduped = deduped[1:] Metadata: i.Metadata,
CreationDate: i.Created,
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
NamesHistory: copyStringSlice(i.NamesHistory),
} }
_, err := s.imageStore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest) for _, key := range i.BigDataNames {
return err data, err := store.BigData(id, key)
if err != nil {
return err
}
dataDigest, err := store.BigDataDigest(id, key)
if err != nil {
return err
}
options.BigData = append(options.BigData, ImageBigDataOption{
Key: key,
Data: data,
Digest: dataDigest,
})
}
_, err = s.imageStore.create(id, i.Names, i.TopLayer, options)
if err != nil {
return err
}
// now make the changes to the writeable image record's names list
return s.imageStore.updateNames(id, deduped, op)
} }
} }
@ -2962,6 +3046,16 @@ func (s *store) Image(id string) (*Image, error) {
if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
image, err := store.Get(id) image, err := store.Get(id)
if err == nil { if err == nil {
if store != s.imageStore {
// found it in a read-only store - readAllImageStores() still has the writeable store locked for reading
if _, localErr := s.imageStore.Get(image.ID); localErr == nil {
// if the lookup key was a name, and we found the image in a read-only
// store, but we have an entry with the same ID in the read-write store,
// then the name was removed when we duplicated the image's
// record into writable storage, so we should ignore this entry
return false, nil
}
}
res = image res = image
return true, nil return true, nil
} }
@ -3247,6 +3341,14 @@ func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
return ret return ret
} }
func copyStringStringMap(m map[string]string) map[string]string {
ret := make(map[string]string, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
func copyDigestSlice(slice []digest.Digest) []digest.Digest { func copyDigestSlice(slice []digest.Digest) []digest.Digest {
if len(slice) == 0 { if len(slice) == 0 {
return nil return nil
@ -3266,6 +3368,31 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
return ret return ret
} }
func copyLayerBigDataOptionSlice(slice []LayerBigDataOption) []LayerBigDataOption {
ret := make([]LayerBigDataOption, len(slice))
copy(ret, slice)
return ret
}
func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption {
ret := make([]ImageBigDataOption, len(slice))
for i := range slice {
ret[i].Key = slice[i].Key
ret[i].Data = append([]byte{}, slice[i].Data...)
ret[i].Digest = slice[i].Digest
}
return ret
}
func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []ContainerBigDataOption {
ret := make([]ContainerBigDataOption, len(slice))
for i := range slice {
ret[i].Key = slice[i].Key
ret[i].Data = append([]byte{}, slice[i].Data...)
}
return ret
}
// AutoUserNsMinSize is the minimum size for automatically created user namespaces // AutoUserNsMinSize is the minimum size for automatically created user namespaces
const AutoUserNsMinSize = 1024 const AutoUserNsMinSize = 1024

View File

@ -175,7 +175,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the // We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used. // maximum IDs used.
clayer, err := rlstore.Create("", topLayer, nil, "", nil, layerOptions, false) clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil)
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@ -2,7 +2,6 @@ package storage
import ( import (
"fmt" "fmt"
"unicode"
"github.com/containers/storage/types" "github.com/containers/storage/types"
) )
@ -73,15 +72,3 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
} }
return dedupeNames(result), nil return dedupeNames(result), nil
} }
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}

View File

@ -386,8 +386,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
} }
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
if fd.Cardinality() == protoreflect.Repeated {
return false
}
if md := fd.Message(); md != nil { if md := fd.Message(); md != nil {
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated return md.FullName() == "google.protobuf.Value"
}
if ed := fd.Enum(); ed != nil {
return ed.FullName() == "google.protobuf.NullValue"
} }
return false return false
} }

View File

@ -18,16 +18,40 @@ import (
"crypto/x509/pkix" "crypto/x509/pkix"
"encoding/asn1" "encoding/asn1"
"errors" "errors"
"fmt"
) )
var ( var (
OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} // Deprecated: Use OIDIssuerV2
OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1}
OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} // Deprecated: Use OIDBuildTrigger
OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2}
// Deprecated: Use OIDSourceRepositoryDigest
OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3}
// Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest
OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4}
// Deprecated: Use SourceRepositoryURI
OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5} OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5}
OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} // Deprecated: Use OIDSourceRepositoryRef
OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6}
OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7}
OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8}
// CI extensions
OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9}
OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10}
OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11}
OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12}
OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13}
OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14}
OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15}
OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16}
OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17}
OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18}
OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19}
OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20}
OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21}
) )
// Extensions contains all custom x509 extensions defined by Fulcio // Extensions contains all custom x509 extensions defined by Fulcio
@ -39,33 +63,79 @@ type Extensions struct {
// a federated login like Dex it should match the issuer URL of the // a federated login like Dex it should match the issuer URL of the
// upstream issuer. The issuer is not set the extensions are invalid and // upstream issuer. The issuer is not set the extensions are invalid and
// will fail to render. // will fail to render.
Issuer string // OID 1.3.6.1.4.1.57264.1.1 Issuer string // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated)
// Deprecated
// Triggering event of the Github Workflow. Matches the `event_name` claim of ID // Triggering event of the Github Workflow. Matches the `event_name` claim of ID
// tokens from Github Actions // tokens from Github Actions
GithubWorkflowTrigger string // OID 1.3.6.1.4.1.57264.1.2 GithubWorkflowTrigger string // OID 1.3.6.1.4.1.57264.1.2
// Deprecated
// SHA of git commit being built in Github Actions. Matches the `sha` claim of ID // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID
// tokens from Github Actions // tokens from Github Actions
GithubWorkflowSHA string // OID 1.3.6.1.4.1.57264.1.3 GithubWorkflowSHA string // OID 1.3.6.1.4.1.57264.1.3
// Deprecated
// Name of Github Actions Workflow. Matches the `workflow` claim of the ID // Name of Github Actions Workflow. Matches the `workflow` claim of the ID
// tokens from Github Actions // tokens from Github Actions
GithubWorkflowName string // OID 1.3.6.1.4.1.57264.1.4 GithubWorkflowName string // OID 1.3.6.1.4.1.57264.1.4
// Deprecated
// Repository of the Github Actions Workflow. Matches the `repository` claim of the ID // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID
// tokens from Github Actions // tokens from Github Actions
GithubWorkflowRepository string // OID 1.3.6.1.4.1.57264.1.5 GithubWorkflowRepository string // OID 1.3.6.1.4.1.57264.1.5
// Deprecated
// Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens
// from Github Actions // from Github Actions
GithubWorkflowRef string // 1.3.6.1.4.1.57264.1.6 GithubWorkflowRef string // 1.3.6.1.4.1.57264.1.6
// Reference to specific build instructions that are responsible for signing.
BuildSignerURI string // 1.3.6.1.4.1.57264.1.9
// Immutable reference to the specific version of the build instructions that is responsible for signing.
BuildSignerDigest string // 1.3.6.1.4.1.57264.1.10
// Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure.
RunnerEnvironment string // 1.3.6.1.4.1.57264.1.11
// Source repository URL that the build was based on.
SourceRepositoryURI string // 1.3.6.1.4.1.57264.1.12
// Immutable reference to a specific version of the source code that the build was based upon.
SourceRepositoryDigest string // 1.3.6.1.4.1.57264.1.13
// Source Repository Ref that the build run was based upon.
SourceRepositoryRef string // 1.3.6.1.4.1.57264.1.14
// Immutable identifier for the source repository the workflow was based upon.
SourceRepositoryIdentifier string // 1.3.6.1.4.1.57264.1.15
// Source repository owner URL of the owner of the source repository that the build was based on.
SourceRepositoryOwnerURI string // 1.3.6.1.4.1.57264.1.16
// Immutable identifier for the owner of the source repository that the workflow was based upon.
SourceRepositoryOwnerIdentifier string // 1.3.6.1.4.1.57264.1.17
// Build Config URL to the top-level/initiating build instructions.
BuildConfigURI string // 1.3.6.1.4.1.57264.1.18
// Immutable reference to the specific version of the top-level/initiating build instructions.
BuildConfigDigest string // 1.3.6.1.4.1.57264.1.19
// Event or action that initiated the build.
BuildTrigger string // 1.3.6.1.4.1.57264.1.20
// Run Invocation URL to uniquely identify the build execution.
RunInvocationURI string // 1.3.6.1.4.1.57264.1.21
} }
func (e Extensions) Render() ([]pkix.Extension, error) { func (e Extensions) Render() ([]pkix.Extension, error) {
var exts []pkix.Extension var exts []pkix.Extension
// BEGIN: Deprecated
if e.Issuer != "" { if e.Issuer != "" {
// deprecated issuer extension due to incorrect encoding
exts = append(exts, pkix.Extension{ exts = append(exts, pkix.Extension{
Id: OIDIssuer, Id: OIDIssuer,
Value: []byte(e.Issuer), Value: []byte(e.Issuer),
@ -103,14 +173,163 @@ func (e Extensions) Render() ([]pkix.Extension, error) {
Value: []byte(e.GithubWorkflowRef), Value: []byte(e.GithubWorkflowRef),
}) })
} }
// END: Deprecated
// duplicate issuer with correct RFC 5280 encoding
if e.Issuer != "" {
// construct DER encoding of issuer string
val, err := asn1.MarshalWithParams(e.Issuer, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDIssuerV2,
Value: val,
})
} else {
return nil, errors.New("extensions must have a non-empty issuer url")
}
if e.BuildSignerURI != "" {
val, err := asn1.MarshalWithParams(e.BuildSignerURI, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDBuildSignerURI,
Value: val,
})
}
if e.BuildSignerDigest != "" {
val, err := asn1.MarshalWithParams(e.BuildSignerDigest, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDBuildSignerDigest,
Value: val,
})
}
if e.RunnerEnvironment != "" {
val, err := asn1.MarshalWithParams(e.RunnerEnvironment, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDRunnerEnvironment,
Value: val,
})
}
if e.SourceRepositoryURI != "" {
val, err := asn1.MarshalWithParams(e.SourceRepositoryURI, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDSourceRepositoryURI,
Value: val,
})
}
if e.SourceRepositoryDigest != "" {
val, err := asn1.MarshalWithParams(e.SourceRepositoryDigest, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDSourceRepositoryDigest,
Value: val,
})
}
if e.SourceRepositoryRef != "" {
val, err := asn1.MarshalWithParams(e.SourceRepositoryRef, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDSourceRepositoryRef,
Value: val,
})
}
if e.SourceRepositoryIdentifier != "" {
val, err := asn1.MarshalWithParams(e.SourceRepositoryIdentifier, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDSourceRepositoryIdentifier,
Value: val,
})
}
if e.SourceRepositoryOwnerURI != "" {
val, err := asn1.MarshalWithParams(e.SourceRepositoryOwnerURI, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDSourceRepositoryOwnerURI,
Value: val,
})
}
if e.SourceRepositoryOwnerIdentifier != "" {
val, err := asn1.MarshalWithParams(e.SourceRepositoryOwnerIdentifier, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDSourceRepositoryOwnerIdentifier,
Value: val,
})
}
if e.BuildConfigURI != "" {
val, err := asn1.MarshalWithParams(e.BuildConfigURI, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDBuildConfigURI,
Value: val,
})
}
if e.BuildConfigDigest != "" {
val, err := asn1.MarshalWithParams(e.BuildConfigDigest, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDBuildConfigDigest,
Value: val,
})
}
if e.BuildTrigger != "" {
val, err := asn1.MarshalWithParams(e.BuildTrigger, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDBuildTrigger,
Value: val,
})
}
if e.RunInvocationURI != "" {
val, err := asn1.MarshalWithParams(e.RunInvocationURI, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDRunInvocationURI,
Value: val,
})
}
return exts, nil return exts, nil
} }
func ParseExtensions(ext []pkix.Extension) (Extensions, error) { func parseExtensions(ext []pkix.Extension) (Extensions, error) {
out := Extensions{} out := Extensions{}
for _, e := range ext { for _, e := range ext {
switch { switch {
// BEGIN: Deprecated
case e.Id.Equal(OIDIssuer): case e.Id.Equal(OIDIssuer):
out.Issuer = string(e.Value) out.Issuer = string(e.Value)
case e.Id.Equal(OIDGitHubWorkflowTrigger): case e.Id.Equal(OIDGitHubWorkflowTrigger):
@ -123,6 +342,63 @@ func ParseExtensions(ext []pkix.Extension) (Extensions, error) {
out.GithubWorkflowRepository = string(e.Value) out.GithubWorkflowRepository = string(e.Value)
case e.Id.Equal(OIDGitHubWorkflowRef): case e.Id.Equal(OIDGitHubWorkflowRef):
out.GithubWorkflowRef = string(e.Value) out.GithubWorkflowRef = string(e.Value)
// END: Deprecated
case e.Id.Equal(OIDIssuerV2):
if err := parseDERString(e.Value, &out.Issuer); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildSignerURI):
if err := parseDERString(e.Value, &out.BuildSignerURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildSignerDigest):
if err := parseDERString(e.Value, &out.BuildSignerDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDRunnerEnvironment):
if err := parseDERString(e.Value, &out.RunnerEnvironment); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryURI):
if err := parseDERString(e.Value, &out.SourceRepositoryURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryDigest):
if err := parseDERString(e.Value, &out.SourceRepositoryDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryRef):
if err := parseDERString(e.Value, &out.SourceRepositoryRef); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryIdentifier):
if err := parseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryOwnerURI):
if err := parseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier):
if err := parseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildConfigURI):
if err := parseDERString(e.Value, &out.BuildConfigURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildConfigDigest):
if err := parseDERString(e.Value, &out.BuildConfigDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildTrigger):
if err := parseDERString(e.Value, &out.BuildTrigger); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDRunInvocationURI):
if err := parseDERString(e.Value, &out.RunInvocationURI); err != nil {
return Extensions{}, err
}
} }
} }
@ -130,3 +406,16 @@ func ParseExtensions(ext []pkix.Extension) (Extensions, error) {
// more complex parsing of fields in a backwards compatible way if needed. // more complex parsing of fields in a backwards compatible way if needed.
return out, nil return out, nil
} }
// parseDERString decodes a DER-encoded string and puts the value in parsedVal.
// Rerturns an error if the unmarshalling fails or if there are trailing bytes in the encoding.
func parseDERString(val []byte, parsedVal *string) error {
rest, err := asn1.Unmarshal(val, parsedVal)
if err != nil {
return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %v", err)
}
if len(rest) != 0 {
return errors.New("unexpected trailing bytes in DER-encoded string")
}
return nil
}

18
vendor/modules.txt vendored
View File

@ -10,7 +10,7 @@ github.com/Microsoft/go-winio/internal/socket
github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/pkg/security github.com/Microsoft/go-winio/pkg/security
github.com/Microsoft/go-winio/vhd github.com/Microsoft/go-winio/vhd
# github.com/Microsoft/hcsshim v0.9.7 # github.com/Microsoft/hcsshim v0.9.8
## explicit; go 1.13 ## explicit; go 1.13
github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/computestorage github.com/Microsoft/hcsshim/computestorage
@ -46,7 +46,7 @@ github.com/asaskevich/govalidator
# github.com/containerd/cgroups v1.0.4 # github.com/containerd/cgroups v1.0.4
## explicit; go 1.17 ## explicit; go 1.17
github.com/containerd/cgroups/stats/v1 github.com/containerd/cgroups/stats/v1
# github.com/containerd/stargz-snapshotter/estargz v0.14.1 # github.com/containerd/stargz-snapshotter/estargz v0.14.3
## explicit; go 1.19 ## explicit; go 1.19
github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil github.com/containerd/stargz-snapshotter/estargz/errorutil
@ -59,7 +59,7 @@ github.com/containers/common/pkg/flag
github.com/containers/common/pkg/report github.com/containers/common/pkg/report
github.com/containers/common/pkg/report/camelcase github.com/containers/common/pkg/report/camelcase
github.com/containers/common/pkg/retry github.com/containers/common/pkg/retry
# github.com/containers/image/v5 v5.24.3-0.20230401101358-e3437f272920 # github.com/containers/image/v5 v5.25.0
## explicit; go 1.18 ## explicit; go 1.18
github.com/containers/image/v5/copy github.com/containers/image/v5/copy
github.com/containers/image/v5/directory github.com/containers/image/v5/directory
@ -150,8 +150,8 @@ github.com/containers/ocicrypt/keywrap/pkcs7
github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/spec
github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils
github.com/containers/ocicrypt/utils/keyprovider github.com/containers/ocicrypt/utils/keyprovider
# github.com/containers/storage v1.45.4 # github.com/containers/storage v1.46.0
## explicit; go 1.17 ## explicit; go 1.18
github.com/containers/storage github.com/containers/storage
github.com/containers/storage/drivers github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs github.com/containers/storage/drivers/aufs
@ -326,7 +326,7 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
## explicit ## explicit
github.com/golang/groupcache/lru github.com/golang/groupcache/lru
# github.com/golang/protobuf v1.5.2 # github.com/golang/protobuf v1.5.3
## explicit; go 1.9 ## explicit; go 1.9
github.com/golang/protobuf/jsonpb github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto github.com/golang/protobuf/proto
@ -442,7 +442,7 @@ github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/image-tools v1.0.0-rc3 # github.com/opencontainers/image-tools v1.0.0-rc3
## explicit ## explicit
github.com/opencontainers/image-tools/image github.com/opencontainers/image-tools/image
# github.com/opencontainers/runc v1.1.4 # github.com/opencontainers/runc v1.1.5
## explicit; go 1.16 ## explicit; go 1.16
github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/user
# github.com/opencontainers/runtime-spec v1.1.0-rc.1 # github.com/opencontainers/runtime-spec v1.1.0-rc.1
@ -475,12 +475,14 @@ github.com/proglottis/gpgme
# github.com/rivo/uniseg v0.4.4 # github.com/rivo/uniseg v0.4.4
## explicit; go 1.18 ## explicit; go 1.18
github.com/rivo/uniseg github.com/rivo/uniseg
# github.com/rogpeppe/go-internal v1.10.0
## explicit; go 1.19
# github.com/russross/blackfriday v2.0.0+incompatible # github.com/russross/blackfriday v2.0.0+incompatible
## explicit ## explicit
# github.com/segmentio/ksuid v1.0.4 # github.com/segmentio/ksuid v1.0.4
## explicit; go 1.12 ## explicit; go 1.12
github.com/segmentio/ksuid github.com/segmentio/ksuid
# github.com/sigstore/fulcio v1.1.0 # github.com/sigstore/fulcio v1.2.0
## explicit; go 1.20 ## explicit; go 1.20
github.com/sigstore/fulcio/pkg/api github.com/sigstore/fulcio/pkg/api
github.com/sigstore/fulcio/pkg/certificate github.com/sigstore/fulcio/pkg/certificate