Merge pull request #1680 from mtrmac/test-registry-2.8.1-with-cosign-signatures

Add OCI artifact support, test syncing Cosign signatures again
This commit is contained in:
Valentin Rothberg 2022-07-04 09:16:23 +02:00 committed by GitHub
commit b95e081162
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
123 changed files with 2777 additions and 2117 deletions

12
go.mod
View File

@ -4,7 +4,7 @@ go 1.17
require ( require (
github.com/containers/common v0.48.0 github.com/containers/common v0.48.0
github.com/containers/image/v5 v5.21.2-0.20220519193817-1e26896b8059 github.com/containers/image/v5 v5.21.2-0.20220624171159-a332352e8a29
github.com/containers/ocicrypt v1.1.5 github.com/containers/ocicrypt v1.1.5
github.com/containers/storage v1.41.0 github.com/containers/storage v1.41.0
github.com/docker/docker v20.10.17+incompatible github.com/docker/docker v20.10.17+incompatible
@ -48,10 +48,10 @@ require (
github.com/gorilla/mux v1.7.4 // indirect github.com/gorilla/mux v1.7.4 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.12 // indirect github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.4 // indirect github.com/klauspost/compress v1.15.6 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kr/pretty v0.2.1 // indirect github.com/kr/pretty v0.2.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
@ -77,11 +77,11 @@ require (
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.7.0 // indirect github.com/sylabs/sif/v2 v2.7.1 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/ulikunitz/xz v0.5.10 // indirect github.com/ulikunitz/xz v0.5.10 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vbauerster/mpb/v7 v7.4.1 // indirect github.com/vbauerster/mpb/v7 v7.4.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect
@ -91,7 +91,7 @@ require (
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 // indirect google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 // indirect

22
go.sum
View File

@ -93,6 +93,7 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@ -273,8 +274,8 @@ github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19
github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw= github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw=
github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0= github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw=
github.com/containers/image/v5 v5.21.2-0.20220519193817-1e26896b8059 h1:/FzsjrQ2nJtMom9IXEGieORlwUk/NyDuuz5SWcNo324= github.com/containers/image/v5 v5.21.2-0.20220624171159-a332352e8a29 h1:fr0KoyCzB7RzDU2ZPoEz0cYZpOrgAyg/J4fj40toAfE=
github.com/containers/image/v5 v5.21.2-0.20220519193817-1e26896b8059/go.mod h1:KntCBNQn3qOuZmQuJ38ORyTozmWXiuo05Vef2S0Sm5M= github.com/containers/image/v5 v5.21.2-0.20220624171159-a332352e8a29/go.mod h1:m8S/6BIrULuOFt38EPXazKMn0rI+HBES0hIcSb5gL3M=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -337,7 +338,6 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
@ -573,8 +573,9 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
@ -608,8 +609,9 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ=
github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -902,11 +904,13 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk=
github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
github.com/sylabs/sif/v2 v2.7.1 h1:XXt9AP39sQfsMCGOGQ/XP9H47yqZOvAonalkaCaNIYM=
github.com/sylabs/sif/v2 v2.7.1/go.mod h1:bBse2nEFd3yHkmq6KmAOFEWQg5LdFYiQUdVcgamxlc8=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@ -929,8 +933,9 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA=
github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo= github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo=
github.com/vbauerster/mpb/v7 v7.4.2 h1:n917F4d8EWdUKc9c81wFkksyG6P6Mg7IETfKCE1Xqng=
github.com/vbauerster/mpb/v7 v7.4.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@ -1253,8 +1258,9 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View File

@ -22,7 +22,7 @@ const (
// A repository with a path with multiple components in it which // A repository with a path with multiple components in it which
// contains multiple tags, preferably with some tags pointing to // contains multiple tags, preferably with some tags pointing to
// manifest lists, and with some tags that don't. // manifest lists, and with some tags that don't.
pullableRepo = "quay.io/libpod/testimage" pullableRepo = "k8s.gcr.io/coredns/coredns"
// A tagged image in the repository that we can inspect and copy. // A tagged image in the repository that we can inspect and copy.
pullableTaggedImage = "k8s.gcr.io/coredns/coredns:v1.6.6" pullableTaggedImage = "k8s.gcr.io/coredns/coredns:v1.6.6"
// A tagged manifest list in the repository that we can inspect and copy. // A tagged manifest list in the repository that we can inspect and copy.

170
vendor/github.com/containers/image/v5/copy/blob.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
package copy
import (
"context"
"io"
"github.com/containers/image/v5/internal/private"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest,
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
// perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob.
func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers;
// that pipeline is built by updating stream.
// === Input: srcReader
stream := sourceStream{
reader: srcReader,
info: srcInfo,
}
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
}
stream.reader = digestingReader
// === Update progress bars
stream.reader = bar.ProxyReader(stream.reader)
// === Decrypt the stream, if required.
decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo)
if err != nil {
return types.BlobInfo{}, err
}
// === Detect compression of the input stream.
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo)
if err != nil {
return types.BlobInfo{}, err
}
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
if getOriginalLayerCopyWriter != nil {
stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor))
originalLayerReader = stream.reader
}
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
// short-circuit conditions
canModifyBlob := !isConfig && ic.cannotModifyManifestReason == ""
// === Deal with layer compression/decompression if necessary
compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression)
if err != nil {
return types.BlobInfo{}, err
}
defer compressionStep.close()
// === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
if decryptionStep.decrypting && toEncrypt {
// If nothing else, we can only set uploadedInfo.CryptoOperation to a single value.
// Before relaxing this, see the original pull requests review if there are other reasons to reject this.
return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
}
encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
if err != nil {
return types.BlobInfo{}, err
}
// === Report progress using the ic.c.progress channel, if required.
if ic.c.progress != nil && ic.c.progressInterval > 0 {
progressReader := newProgressReader(
stream.reader,
ic.c.progress,
ic.c.progressInterval,
srcInfo,
)
defer progressReader.reportDone()
stream.reader = progressReader
}
// === Finally, send the layer stream to dest.
options := private.PutBlobOptions{
Cache: ic.c.blobInfoCache,
IsConfig: isConfig,
EmptyLayer: emptyLayer,
}
if !isConfig {
options.LayerIndex = &layerIndex
}
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
}
uploadedInfo.Annotations = stream.info.Annotations
compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil {
return types.BlobInfo{}, err
}
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
// So, read everything from originalLayerReader, which will cause the rest to be
// sent there if we are not already at EOF.
if getOriginalLayerCopyWriter != nil {
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(io.Discard, originalLayerReader)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
}
}
if digestingReader.validationFailed { // Coverage: This should never happen.
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
return types.BlobInfo{}, err
}
}
return uploadedInfo, nil
}
// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built.
// This allows handles of individual aspects to build the copy pipeline without _too much_
// specific cooperation by the caller.
//
// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline
// without specific knowledge of various aspects in copyBlobFromStream; that may come one day.
type sourceStream struct {
reader io.Reader
info types.BlobInfo // corresponding to the data available in reader.
}
// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
type errorAnnotationReader struct {
reader io.Reader
}
// Read annotates the error happened during read
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
n, err = r.reader.Read(b)
if err != io.EOF {
return n, errors.Wrapf(err, "happened during read")
}
return n, err
}

View File

@ -0,0 +1,320 @@
package copy
import (
"io"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
type bpDetectCompressionStepData struct {
isCompressed bool
format compressiontypes.Algorithm // Valid if isCompressed
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
}
// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
// srcInfo is only used for error messages.
// Returns data for other steps.
func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) {
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return bpDetectCompressionStepData{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
}
stream.reader = reader
res := bpDetectCompressionStepData{
isCompressed: decompressor != nil,
format: format,
decompressor: decompressor,
}
if res.isCompressed {
res.srcCompressorName = format.Name()
} else {
res.srcCompressorName = internalblobinfocache.Uncompressed
}
if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
}
return res, nil
}
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
type bpCompressionStepData struct {
operation types.LayerCompression // Operation to use for updating the blob metadata.
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
closers []io.Closer // Objects to close after the upload is done, if any.
}
// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
// srcInfo is primarily used for error messages.
// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
// and must eventually call close.
func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo,
detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
// short-circuit conditions
layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType)
if !layerCompressionChangeSupported {
logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType)
}
if canModifyBlob && layerCompressionChangeSupported {
for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
ic.bpcPreserveEncrypted,
ic.bpcCompressUncompressed,
ic.bpcRecompressCompressed,
ic.bpcDecompressCompressed,
} {
res, err := fn(stream, detected)
if err != nil {
return nil, err
}
if res != nil {
return res, nil
}
}
}
return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil
}
// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if isOciEncrypted(stream.info.MediaType) {
logrus.Debugf("Using original blob without modification for encrypted blob")
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: nil,
srcCompressorName: internalblobinfocache.UnknownCompression,
uploadedCompressorName: internalblobinfocache.UnknownCompression,
}, nil
}
return nil, nil
}
// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
logrus.Debugf("Compressing blob on the fly")
var uploadedAlgorithm *compressiontypes.Algorithm
if ic.c.compressionFormat != nil {
uploadedAlgorithm = ic.c.compressionFormat
} else {
uploadedAlgorithm = defaultCompressionFormat
}
reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm)
// Note: reader must be closed on all return paths.
stream.reader = reader
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
Digest: "",
Size: -1,
}
return &bpCompressionStepData{
operation: types.Compress,
uploadedAlgorithm: uploadedAlgorithm,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: uploadedAlgorithm.Name(),
closers: []io.Closer{reader},
}, nil
}
return nil, nil
}
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
decompressed, err := detected.decompressor(stream.reader)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
decompressed.Close()
}
}()
recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat)
// Note: recompressed must be closed on all return paths.
stream.reader = recompressed
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
Digest: "",
Size: -1,
}
succeeded = true
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: ic.c.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: ic.c.compressionFormat.Name(),
closers: []io.Closer{decompressed, recompressed},
}, nil
}
return nil, nil
}
// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed {
logrus.Debugf("Blob will be decompressed")
s, err := detected.decompressor(stream.reader)
if err != nil {
return nil, err
}
// Note: s must be closed on all return paths.
stream.reader = s
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
Digest: "",
Size: -1,
}
return &bpCompressionStepData{
operation: types.Decompress,
uploadedAlgorithm: nil,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: internalblobinfocache.Uncompressed,
closers: []io.Closer{s},
}, nil
}
return nil, nil
}
// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
layerCompressionChangeSupported bool) *bpCompressionStepData {
logrus.Debugf("Using original blob without modification")
// Remember if the original blob was compressed, and if so how, so that if
// LayerInfosForCopy() returned something that differs from what was in the
// source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
// it will be able to correctly derive the MediaType for the copied blob.
//
// But dont touch blobs in objects where we cant change compression,
// so that src.UpdatedImage() doesnt fail; assume that for such blobs
// LayerInfosForCopy() should not be making any changes in the first place.
var algorithm *compressiontypes.Algorithm
if layerCompressionChangeSupported && detected.isCompressed {
algorithm = &detected.format
} else {
algorithm = nil
}
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: algorithm,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: detected.srcCompressorName,
}
}
// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
*operation = d.operation
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
*algorithm = d.uploadedAlgorithm
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range d.uploadedAnnotations {
(*annotations)[k] = v
}
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties.
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encryptionStep.encrypting && !decryptionStep.decrypting {
// If d.operation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
// uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
// (because stream.info.Digest == "", this must have been computed afresh).
switch d.operation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return errors.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
return nil
}
// close closes objects that carry state throughout the compression/decompression operation.
func (d *bpCompressionStepData) close() {
for _, c := range d.closers {
c.Close()
}
}
// doCompression reads all input from src and writes its compressed equivalent to dest.
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil {
return err
}
buf := make([]byte, compressionBufferSize)
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
if err != nil {
compressor.Close()
return err
}
return compressor.Close()
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
}
// compressedStream returns a stream the input reader compressed using format, and a metadata map.
// The caller must close the returned reader.
// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
pipeReader, pipeWriter := io.Pipe()
annotations := map[string]string{}
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
return pipeReader, annotations
}

View File

@ -12,8 +12,8 @@ import (
"time" "time"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/imagesource"
"github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/internal/pkg/platform"
@ -25,7 +25,6 @@ import (
"github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config" encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -82,7 +81,7 @@ type copier struct {
type imageCopier struct { type imageCopier struct {
c *copier c *copier
manifestUpdates *types.ManifestUpdateOptions manifestUpdates *types.ManifestUpdateOptions
src types.Image src *image.SourcedImage
diffIDsAreNeeded bool diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool canSubstituteBlobs bool
@ -349,13 +348,8 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the // compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. // (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifest, _, err := src.Manifest(ctx) srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
return false, nil, "", "", errors.Wrapf(err, "reading manifest from image")
}
srcManifestDigest, err := manifest.Digest(srcManifest)
if err != nil { if err != nil {
return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest") return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
} }
@ -620,11 +614,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
if named := c.dest.Reference().DockerReference(); named != nil { if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok { if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true destIsDigestedReference = true
sourceManifest, _, err := src.Manifest(ctx) matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
}
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
if err != nil { if err != nil {
return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest") return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
} }
@ -688,7 +678,9 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
cannotModifyManifestReason: cannotModifyManifestReason, cannotModifyManifestReason: cannotModifyManifestReason,
ociEncryptLayers: options.OciEncryptLayers, ociEncryptLayers: options.OciEncryptLayers,
} }
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. // Decide whether we can substitute blobs with semantic equivalents:
// - Dont do that if we cant modify the manifest at all
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least theres a risk // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least theres a risk
@ -702,12 +694,23 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
// We compute preferredManifestMIMEType only to show it in error messages. manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. srcMIMEType: ic.src.ManifestMIMEType,
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption) destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
forceManifestMIMEType: options.ForceManifestMIMEType,
requiresOCIEncryption: destRequiresOciEncryption,
cannotModifyManifestReason: ic.cannotModifyManifestReason,
})
if err != nil { if err != nil {
return nil, "", "", err return nil, "", "", err
} }
// We set up this part of ic.manifestUpdates quite early, not just around the
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
// (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
// on the expected destination format.
if manifestConversionPlan.preferredMIMETypeNeedsConversion {
ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
}
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
@ -742,22 +745,22 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine… // we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
retManifestType = preferredManifestMIMEType retManifestType = manifestConversionPlan.preferredMIMEType
if err != nil { if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or // … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type // because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed. // have other options available that could still succeed.
_, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError) _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
_, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError) _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 { if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We dont have other options. // We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly. // In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice. // Dont bother the user with MIME types if we have no choice.
return nil, "", "", err return nil, "", "", err
} }
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest. // So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so lets bail out early and with a better error message. // so lets bail out early and with a better error message.
@ -766,8 +769,8 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
} }
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
for _, manifestMIMEType := range otherManifestMIMETypeCandidates { for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
@ -908,11 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// The manifest is used to extract the information whether a given // The manifest is used to extract the information whether a given
// layer is empty. // layer is empty.
manifestBlob, manifestType, err := ic.src.Manifest(ctx) man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return err
}
man, err := manifest.FromBlob(manifestBlob, manifestType)
if err != nil { if err != nil {
return err return err
} }
@ -1022,7 +1021,7 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
// stores the resulting config and manifest to the destination, and returns the stored manifest // stores the resulting config and manifest to the destination, and returns the stored manifest
// and its digest. // and its digest.
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
pendingImage := ic.src var pendingImage types.Image = ic.src
if !ic.noPendingManifestUpdates() { if !ic.noPendingManifestUpdates() {
if ic.cannotModifyManifestReason != "" { if ic.cannotModifyManifestReason != "" {
return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
@ -1047,7 +1046,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
return nil, "", errors.Wrap(err, "reading manifest") return nil, "", errors.Wrap(err, "reading manifest")
} }
if err := ic.c.copyConfig(ctx, pendingImage); err != nil { if err := ic.copyConfig(ctx, pendingImage); err != nil {
return nil, "", err return nil, "", err
} }
@ -1067,19 +1066,19 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
} }
// copyConfig copies config.json, if any, from src to dest. // copyConfig copies config.json, if any, from src to dest.
func (c *copier) copyConfig(ctx context.Context, src types.Image) error { func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo() srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" { if srcInfo.Digest != "" {
if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying config: %w", err) return fmt.Errorf("copying config: %w", err)
} }
defer c.concurrentBlobCopiesSemaphore.Release(1) defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := c.newProgressPool() progressPool := ic.c.newProgressPool()
defer progressPool.Wait() defer progressPool.Wait()
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false) defer bar.Abort(false)
configBlob, err := src.ConfigBlob(ctx) configBlob, err := src.ConfigBlob(ctx)
@ -1087,7 +1086,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
} }
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
@ -1146,6 +1145,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// Dont read the layer from the source if we already have the blob, and optimizations are acceptable. // Dont read the layer from the source if we already have the blob, and optimizations are acceptable.
if canAvoidProcessingCompleteLayer { if canAvoidProcessingCompleteLayer {
canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
@ -1154,7 +1157,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// the ImageDestination interface lets us pass in. // the ImageDestination interface lets us pass in.
reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache, Cache: ic.c.blobInfoCache,
CanSubstitute: ic.canSubstituteBlobs, CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer, EmptyLayer: emptyLayer,
LayerIndex: &layerIndex, LayerIndex: &layerIndex,
SrcRef: srcRef, SrcRef: srcRef,
@ -1303,7 +1306,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
} }
} }
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
return blobInfo, diffIDChan, err return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
} }
@ -1333,350 +1336,3 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
return digest.Canonical.FromReader(stream) return digest.Canonical.FromReader(stream)
} }
// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
type errorAnnotationReader struct {
reader io.Reader
}
// Read annotates the error happened during read
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
n, err = r.reader.Read(b)
if err != io.EOF {
return n, errors.Wrapf(err, "happened during read")
}
return n, err
}
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
// perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false
}
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
}
var destStream io.Reader = digestingReader
// === Update progress bars
destStream = bar.ProxyReader(destStream)
// === Decrypt the stream, if required.
var decrypted bool
if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
newDesc := imgspecv1.Descriptor{
Annotations: srcInfo.Annotations,
}
var d digest.Digest
destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
}
srcInfo.Digest = d
srcInfo.Size = -1
for k := range srcInfo.Annotations {
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
delete(srcInfo.Annotations, k)
}
}
decrypted = true
}
// === Detect compression of the input stream.
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
}
isCompressed := decompressor != nil
if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
}
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
if getOriginalLayerCopyWriter != nil {
destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
originalLayerReader = destStream
}
compressionMetadata := map[string]string{}
// === Deal with layer compression/decompression if necessary
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
// short-circuit conditions
var inputInfo types.BlobInfo
var compressionOperation types.LayerCompression
var uploadCompressionFormat *compressiontypes.Algorithm
srcCompressorName := internalblobinfocache.Uncompressed
if isCompressed {
srcCompressorName = compressionFormat.Name()
}
var uploadCompressorName string
if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
logrus.Debugf("Using original blob without modification for encrypted blob")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
srcCompressorName = internalblobinfocache.UnknownCompression
uploadCompressionFormat = nil
uploadCompressorName = internalblobinfocache.UnknownCompression
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
if c.compressionFormat != nil {
uploadCompressionFormat = c.compressionFormat
} else {
uploadCompressionFormat = defaultCompressionFormat
}
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressorName = uploadCompressionFormat.Name()
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed &&
c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
compressionOperation = types.PreserveOriginal
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
}
defer s.Close()
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
uploadCompressionFormat = c.compressionFormat
go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressorName = uploadCompressionFormat.Name()
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
compressionOperation = types.Decompress
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
}
defer s.Close()
destStream = s
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressionFormat = nil
uploadCompressorName = internalblobinfocache.Uncompressed
} else {
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
logrus.Debugf("Using original blob without modification")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
// Remember if the original blob was compressed, and if so how, so that if
// LayerInfosForCopy() returned something that differs from what was in the
// source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
// it will be able to correctly derive the MediaType for the copied blob.
if isCompressed {
uploadCompressionFormat = &compressionFormat
} else {
uploadCompressionFormat = nil
}
uploadCompressorName = srcCompressorName
}
// === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
var (
encrypted bool
finalizer ocicrypt.EncryptLayerFinalizer
)
if toEncrypt {
if decrypted {
return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
}
if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
var annotations map[string]string
if !decrypted {
annotations = srcInfo.Annotations
}
desc := imgspecv1.Descriptor{
MediaType: srcInfo.MediaType,
Digest: srcInfo.Digest,
Size: srcInfo.Size,
Annotations: annotations,
}
s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
}
destStream = s
finalizer = fin
inputInfo.Digest = ""
inputInfo.Size = -1
encrypted = true
}
}
// === Report progress using the c.progress channel, if required.
if c.progress != nil && c.progressInterval > 0 {
progressReader := newProgressReader(
destStream,
c.progress,
c.progressInterval,
srcInfo,
)
defer progressReader.reportDone()
destStream = progressReader
}
// === Finally, send the layer stream to dest.
options := private.PutBlobOptions{
Cache: c.blobInfoCache,
IsConfig: isConfig,
EmptyLayer: emptyLayer,
}
if !isConfig {
options.LayerIndex = &layerIndex
}
uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
}
uploadedInfo.Annotations = srcInfo.Annotations
uploadedInfo.CompressionOperation = compressionOperation
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
uploadedInfo.CompressionAlgorithm = uploadCompressionFormat
if decrypted {
uploadedInfo.CryptoOperation = types.Decrypt
} else if encrypted {
encryptAnnotations, err := finalizer()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption")
}
uploadedInfo.CryptoOperation = types.Encrypt
if uploadedInfo.Annotations == nil {
uploadedInfo.Annotations = map[string]string{}
}
for k, v := range encryptAnnotations {
uploadedInfo.Annotations[k] = v
}
}
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
// So, read everything from originalLayerReader, which will cause the rest to be
// sent there if we are not already at EOF.
if getOriginalLayerCopyWriter != nil {
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(io.Discard, originalLayerReader)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
}
}
if digestingReader.validationFailed { // Coverage: This should never happen.
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encrypted && !decrypted {
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
// (because inputInfo.Digest == "", this must have been computed afresh).
switch compressionOperation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
}
}
if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
}
if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
}
}
// Copy all the metadata generated by the compressor into the annotations.
if uploadedInfo.Annotations == nil {
uploadedInfo.Annotations = map[string]string{}
}
for k, v := range compressionMetadata {
uploadedInfo.Annotations[k] = v
}
return uploadedInfo, nil
}
// doCompression reads all input from src and writes its compressed equivalent to dest.
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil {
return err
}
buf := make([]byte, compressionBufferSize)
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
if err != nil {
compressor.Close()
return err
}
return compressor.Close()
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
}

View File

@ -1,24 +0,0 @@
package copy
import (
"strings"
"github.com/containers/image/v5/types"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
// This function will be moved to be part of OCI spec when adopted.
func isOciEncrypted(mediatype string) bool {
return strings.HasSuffix(mediatype, "+encrypted")
}
// isEncrypted checks if an image is encrypted
func isEncrypted(i types.Image) bool {
layers := i.LayerInfos()
for _, l := range layers {
if isOciEncrypted(l.MediaType) {
return true
}
}
return false
}

View File

@ -0,0 +1,129 @@
package copy
import (
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
// This function will be moved to be part of OCI spec when adopted.
func isOciEncrypted(mediatype string) bool {
return strings.HasSuffix(mediatype, "+encrypted")
}
// isEncrypted checks if an image is encrypted
func isEncrypted(i types.Image) bool {
layers := i.LayerInfos()
for _, l := range layers {
if isOciEncrypted(l.MediaType) {
return true
}
}
return false
}
// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
type bpDecryptionStepData struct {
decrypting bool // We are actually decrypting the stream
}
// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
// srcInfo is only used for error messages.
// Returns data for other steps; the caller should eventually use updateCryptoOperation.
func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil {
desc := imgspecv1.Descriptor{
Annotations: stream.info.Annotations,
}
reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
if err != nil {
return nil, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
}
stream.reader = reader
stream.info.Digest = decryptedDigest
stream.info.Size = -1
for k := range stream.info.Annotations {
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
delete(stream.info.Annotations, k)
}
}
return &bpDecryptionStepData{
decrypting: true,
}, nil
}
return &bpDecryptionStepData{
decrypting: false,
}, nil
}
// updateCryptoOperation sets *operation, if necessary.
func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) {
if d.decrypting {
*operation = types.Decrypt
}
}
// bpdData contains data that the copy pipeline needs about the encryption step.
type bpEncryptionStepData struct {
encrypting bool // We are actually encrypting the stream
finalizer ocicrypt.EncryptLayerFinalizer
}
// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
// srcInfo is primarily used for error messages.
// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
var annotations map[string]string
if !decryptionStep.decrypting {
annotations = srcInfo.Annotations
}
desc := imgspecv1.Descriptor{
MediaType: srcInfo.MediaType,
Digest: srcInfo.Digest,
Size: srcInfo.Size,
Annotations: annotations,
}
reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
if err != nil {
return nil, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
}
stream.reader = reader
stream.info.Digest = ""
stream.info.Size = -1
return &bpEncryptionStepData{
encrypting: true,
finalizer: finalizer,
}, nil
}
return &bpEncryptionStepData{
encrypting: false,
}, nil
}
// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary.
func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error {
if !d.encrypting {
return nil
}
encryptAnnotations, err := d.finalizer()
if err != nil {
return errors.Wrap(err, "Unable to finalize encryption")
}
*operation = types.Encrypt
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range encryptAnnotations {
(*annotations)[k] = v
}
return nil
}

View File

@ -38,31 +38,50 @@ func (os *orderedSet) append(s string) {
} }
} }
// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. // determineManifestConversionInputs contains the inputs for determineManifestConversion.
// Note that the conversion will only happen later, through ic.src.UpdatedImage type determineManifestConversionInputs struct {
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), srcMIMEType string // MIME type of the input manifest
// and a list of other possible alternatives, in order.
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) { destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
_, srcType, err := ic.src.Manifest(ctx)
if err != nil { // This should have been cached?! forceManifestMIMEType string // Users choice of forced manifest MIME type
return "", nil, errors.Wrap(err, "reading manifest") requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
} cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
}
// manifestConversionPlan contains the decisions made by determineManifestConversion.
type manifestConversionPlan struct {
// The preferred manifest MIME type (whether we are converting to it or using it unmodified).
// We compute this only to show it in error messages; without having to add this context
// in an error message, we would be happy enough to know only that no conversion is needed.
preferredMIMEType string
preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step.
otherMIMETypeCandidates []string // Other possible alternatives, in order
}
// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in.
func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) {
srcType := in.srcMIMEType
normalizedSrcType := manifest.NormalizedMIMEType(srcType) normalizedSrcType := manifest.NormalizedMIMEType(srcType)
if srcType != normalizedSrcType { if srcType != normalizedSrcType {
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
srcType = normalizedSrcType srcType = normalizedSrcType
} }
if forceManifestMIMEType != "" { destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes
destSupportedManifestMIMETypes = []string{forceManifestMIMEType} if in.forceManifestMIMEType != "" {
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
} }
if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
preferredMIMEType: srcType,
otherMIMETypeCandidates: []string{},
}, nil
} }
supportedByDest := map[string]struct{}{} supportedByDest := map[string]struct{}{}
for _, t := range destSupportedManifestMIMETypes { for _, t := range destSupportedManifestMIMETypes {
if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) { if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
supportedByDest[t] = struct{}{} supportedByDest[t] = struct{}{}
} }
} }
@ -79,13 +98,16 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
if _, ok := supportedByDest[srcType]; ok { if _, ok := supportedByDest[srcType]; ok {
prioritizedTypes.append(srcType) prioritizedTypes.append(srcType)
} }
if ic.cannotModifyManifestReason != "" { if in.cannotModifyManifestReason != "" {
// We could also drop this check and have the caller // We could also drop this check and have the caller
// make the choice; it is already doing that to an extent, to improve error // make the choice; it is already doing that to an extent, to improve error
// messages. But it is nice to hide the “if we can't modify, do no conversion” // messages. But it is nice to hide the “if we can't modify, do no conversion”
// special case in here; the caller can then worry (or not) only about a good UI. // special case in here; the caller can then worry (or not) only about a good UI.
logrus.Debugf("We can't modify the manifest, hoping for the best...") logrus.Debugf("We can't modify the manifest, hoping for the best...")
return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying?
preferredMIMEType: srcType,
otherMIMETypeCandidates: []string{},
}, nil
} }
// Then use our list of preferred types. // Then use our list of preferred types.
@ -102,15 +124,17 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
return "", nil, errors.New("Internal error: no candidate MIME types") return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
} }
preferredType := prioritizedTypes.list[0] res := manifestConversionPlan{
if preferredType != srcType { preferredMIMEType: prioritizedTypes.list[0],
ic.manifestUpdates.ManifestMIMEType = preferredType otherMIMETypeCandidates: prioritizedTypes.list[1:],
} else { }
res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType
if !res.preferredMIMETypeNeedsConversion {
logrus.Debugf("... will first try using the original manifest unmodified") logrus.Debugf("... will first try using the original manifest unmodified")
} }
return preferredType, prioritizedTypes.list[1:], nil return res, nil
} }
// isMultiImage returns true if img is a list of images // isMultiImage returns true if img is a list of images

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
@ -140,8 +140,7 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src := newImageSource(ref) return image.FromReference(ctx, sys, ref)
return image.FromSource(ctx, sys, src)
} }
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
ctrImage "github.com/containers/image/v5/image" ctrImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -185,11 +185,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref) return ctrImage.FromReference(ctx, sys, ref)
if err != nil {
return nil, err
}
return ctrImage.FromSource(ctx, sys, src)
} }
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.

View File

@ -6,7 +6,7 @@ import (
"github.com/containers/image/v5/docker/policyconfiguration" "github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
@ -195,11 +195,7 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref) return image.FromReference(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
} }
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.

View File

@ -164,7 +164,6 @@ func serverDefault() *tls.Config {
return &tls.Config{ return &tls.Config{
// Avoid fallback to SSL protocols < TLS1.0 // Avoid fallback to SSL protocols < TLS1.0
MinVersion: tls.VersionTLS10, MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
} }
} }

View File

@ -9,7 +9,7 @@ import (
"strings" "strings"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"

View File

@ -1,400 +1,14 @@
package image package image
import ( import (
"bytes" "github.com/containers/image/v5/internal/image"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) // GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is // This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space // a non-zero embedded timestamp; we could zero that, but that would just waste storage space
// in registries, so lets use the same values. // in registries, so lets use the same values.
var GzippedEmptyLayer = []byte{ var GzippedEmptyLayer = image.GzippedEmptyLayer
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer // GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest
type manifestSchema2 struct {
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
m *manifest.Schema2
}
func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
m, err := manifest.Schema2FromManifest(manifestBlob)
if err != nil {
return nil, err
}
return &manifestSchema2{
src: src,
m: m,
}, nil
}
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
return &manifestSchema2{
src: src,
configBlob: configBlob,
m: manifest.Schema2FromComponents(config, layers),
}
}
func (m *manifestSchema2) serialize() ([]byte, error) {
return m.m.Serialize()
}
func (m *manifestSchema2) manifestMIMEType() string {
return m.m.MediaType
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
return m.m.ConfigInfo()
}
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
// layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially).
func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
configBlob, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
// than OCI v1. This unmarshal makes sure we drop docker v2s2
// fields that aren't needed in OCI v1.
configOCI := &imgspecv1.Image{}
if err := json.Unmarshal(configBlob, configOCI); err != nil {
return nil, err
}
return configOCI, nil
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
// The result is cached; it is OK to call this however often you need.
func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.configBlob == nil {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil {
return nil, err
}
defer stream.Close()
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.ConfigDescriptor.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
return m.configBlob, nil
}
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
// It returns false if the manifest does not embed a Docker reference.
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
return false
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
getter := func(info types.BlobInfo) ([]byte, error) {
if info.Digest != m.ConfigInfo().Digest {
// Shouldn't ever happen
return nil, errors.New("asked for a different config blob")
}
config, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
return config, nil
}
return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
// (most importantly it forces us to download the full layers even if they are already present at the destination).
func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
return false
}
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
// if the CompressionOperation and CompressionAlgorithm specified in one or more
// options.LayerInfos items is anything other than gzip.
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,
configBlob: m.configBlob,
m: manifest.Schema2Clone(m.m),
}
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
})
if err != nil {
return nil, err
}
if converted != nil {
return converted, nil
}
// No conversion required, update manifest
if options.LayerInfos != nil {
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
return memoryImageFromManifest(&copy), nil
}
func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
return imgspecv1.Descriptor{
MediaType: d.MediaType,
Size: d.Size,
Digest: d.Digest,
URLs: d.URLs,
}
}
// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
configOCI, err := m.OCIConfig(ctx)
if err != nil {
return nil, err
}
configOCIBytes, err := json.Marshal(configOCI)
if err != nil {
return nil, err
}
config := imgspecv1.Descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
}
layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
for idx := range layers {
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
switch m.m.LayersDescriptors[idx].MediaType {
case manifest.DockerV2Schema2ForeignLayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
case manifest.DockerV2Schema2LayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
default:
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
}
}
return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
}
// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
//
// Based on docker/distribution/manifest/schema1/config_builder.go
func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
dest := options.InformationOnly.Destination
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
if options.LayerInfos != nil {
if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
len(options.LayerInfos), len(m.m.LayersDescriptors))
}
convertedLayerUpdates = []types.BlobInfo{}
}
configBytes, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
imageConfig := &manifest.Schema2Image{}
if err := json.Unmarshal(configBytes, imageConfig); err != nil {
return nil, err
}
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
history := make([]manifest.Schema1History, len(imageConfig.History))
nonemptyLayerIndex := 0
var parentV1ID string // Set in the loop
v1ID := ""
haveGzippedEmptyLayer := false
if len(imageConfig.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
}
for v2Index, historyEntry := range imageConfig.History {
parentV1ID = v1ID
v1Index := len(imageConfig.History) - 1 - v2Index
var blobDigest digest.Digest
if historyEntry.EmptyLayer {
emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
// and anyway this blob is so small that its easier to just copy it than to worry about figuring out another location where to get it.
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "uploading empty layer")
}
if info.Digest != emptyLayerBlobInfo.Digest {
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
}
haveGzippedEmptyLayer = true
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
}
blobDigest = emptyLayerBlobInfo.Digest
} else {
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
}
blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++
}
// AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
if err != nil {
return nil, err
}
v1ID = v
fakeImage := manifest.Schema1V1Compatibility{
ID: v1ID,
Parent: parentV1ID,
Comment: historyEntry.Comment,
Created: historyEntry.Created,
Author: historyEntry.Author,
ThrowAway: historyEntry.EmptyLayer,
}
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
if err != nil {
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
// Note that parentV1ID of the top layer is preserved when exiting this loop
}
// Now patch in real configuration for the top layer (v1Index == 0)
v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
if err != nil {
return nil, err
}
v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(v1Config)
if options.LayerInfos != nil {
options.LayerInfos = convertedLayerUpdates
}
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
if err != nil {
return nil, err // This should never happen, we should have created all the components correctly.
}
return m1, nil
}
func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
if err := blobDigest.Validate(); err != nil {
return "", err
}
parts := append([]string{blobDigest.Hex()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil
}
func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Preserve everything we don't specifically know about.
// (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
rawContents := map[string]*json.RawMessage{}
if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
return nil, err
}
delete(rawContents, "rootfs")
delete(rawContents, "history")
updates := map[string]interface{}{"id": v1ID}
if parentV1ID != "" {
updates["parent"] = parentV1ID
}
if throwaway {
updates["throwaway"] = throwaway
}
for field, value := range updates {
encoded, err := json.Marshal(value)
if err != nil {
return nil, err
}
rawContents[field] = (*json.RawMessage)(&encoded)
}
return json.Marshal(rawContents)
}
// SupportsEncryption returns if encryption is supported for the manifest type
func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
return false
}

View File

@ -6,17 +6,10 @@ package image
import ( import (
"context" "context"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
) )
// imageCloser implements types.ImageCloser, perhaps allowing simple users
// to use a single object without having keep a reference to a types.ImageSource
// only to call types.ImageSource.Close().
type imageCloser struct {
types.Image
src types.ImageSource
}
// FromSource returns a types.ImageCloser implementation for the default instance of source. // FromSource returns a types.ImageCloser implementation for the default instance of source.
// If source is a manifest list, .Manifest() still returns the manifest list, // If source is a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance. // but other methods transparently return data from an appropriate image instance.
@ -31,33 +24,7 @@ type imageCloser struct {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) return image.FromSource(ctx, sys, src)
if err != nil {
return nil, err
}
return &imageCloser{
Image: img,
src: src,
}, nil
}
func (ic *imageCloser) Close() error {
return ic.src.Close()
}
// sourcedImage is a general set of utilities for working with container images,
// whatever is their underlying location (i.e. dockerImageSource-independent).
// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
// may not be a `sourcedImage` directly. However, most users of `types.Image`
// do not care, and those who care about `skopeo/docker.Image` know they do.
type sourcedImage struct {
*UnparsedImage
manifestBlob []byte
manifestMIMEType string
// genericManifest contains data corresponding to manifestBlob.
// NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
// if you want to preserve the original manifest; use manifestBlob directly.
genericManifest
} }
// FromUnparsedImage returns a types.Image implementation for unparsed. // FromUnparsedImage returns a types.Image implementation for unparsed.
@ -66,39 +33,5 @@ type sourcedImage struct {
// //
// The Image must not be used after the underlying ImageSource is Close()d. // The Image must not be used after the underlying ImageSource is Close()d.
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
// Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: return image.FromUnparsedImage(ctx, sys, unparsed)
// we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
// this is the only UnparsedImage implementation around, anyway.
// NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
if err != nil {
return nil, err
}
parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
if err != nil {
return nil, err
}
return &sourcedImage{
UnparsedImage: unparsed,
manifestBlob: manifestBlob,
manifestMIMEType: manifestMIMEType,
genericManifest: parsedManifest,
}, nil
}
// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
func (i *sourcedImage) Size() (int64, error) {
return -1, nil
}
// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
return i.manifestBlob, i.manifestMIMEType, nil
}
func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
} }

View File

@ -1,95 +1,19 @@
package image package image
import ( import (
"context" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
// UnparsedImage implements types.UnparsedImage . // UnparsedImage implements types.UnparsedImage .
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. // An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
type UnparsedImage struct { type UnparsedImage = image.UnparsedImage
src types.ImageSource
instanceDigest *digest.Digest
cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
// A private cache for Manifest(), may be the empty string if guessing failed.
// Valid iff cachedManifest is not nil.
cachedManifestMIMEType string
cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
}
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
// //
// The UnparsedImage must not be used after the underlying ImageSource is Close()d. // The UnparsedImage must not be used after the underlying ImageSource is Close()d.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
return &UnparsedImage{ return image.UnparsedInstance(src, instanceDigest)
src: src,
instanceDigest: instanceDigest,
}
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (i *UnparsedImage) Reference() types.ImageReference {
// Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
return i.src.Reference()
}
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
if i.cachedManifest == nil {
m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
if err != nil {
return nil, "", err
}
// ImageSource.GetManifest does not do digest verification, but we do;
// this immediately protects also any user of types.Image.
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
matches, err := manifest.MatchesDigest(m, digest)
if err != nil {
return nil, "", errors.Wrap(err, "computing manifest digest")
}
if !matches {
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
}
}
i.cachedManifest = m
i.cachedManifestMIMEType = mt
}
return i.cachedManifest, i.cachedManifestMIMEType, nil
}
// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
// The bool return value seems redundant with digest != ""; it is used explicitly
// to refuse (unexpected) situations when the digest exists but is "".
func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
if i.instanceDigest != nil {
return *i.instanceDigest, true
}
ref := i.Reference().DockerReference()
if ref != nil {
if canonical, ok := ref.(reference.Canonical); ok {
return canonical.Digest(), true
}
}
return "", false
}
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
if i.cachedSignatures == nil {
sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
if err != nil {
return nil, err
}
i.cachedSignatures = sigs
}
return i.cachedSignatures, nil
} }

View File

@ -246,3 +246,12 @@ func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *ty
func (m *manifestSchema1) SupportsEncryption(context.Context) bool { func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
return false return false
} }
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool {
return true // There are no MIME types in the manifest, so we must assume a valid image.
}

View File

@ -0,0 +1,413 @@
package image
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
// in registries, so lets use the same values.
//
// This is publicly visible as c/image/image.GzippedEmptyLayer.
var GzippedEmptyLayer = []byte{
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
//
// This is publicly visible as c/image/image.GzippedEmptyLayerDigest.
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
type manifestSchema2 struct {
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
m *manifest.Schema2
}
func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
m, err := manifest.Schema2FromManifest(manifestBlob)
if err != nil {
return nil, err
}
return &manifestSchema2{
src: src,
m: m,
}, nil
}
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
return &manifestSchema2{
src: src,
configBlob: configBlob,
m: manifest.Schema2FromComponents(config, layers),
}
}
func (m *manifestSchema2) serialize() ([]byte, error) {
return m.m.Serialize()
}
func (m *manifestSchema2) manifestMIMEType() string {
return m.m.MediaType
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
return m.m.ConfigInfo()
}
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
// layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially).
func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
configBlob, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
// than OCI v1. This unmarshal makes sure we drop docker v2s2
// fields that aren't needed in OCI v1.
configOCI := &imgspecv1.Image{}
if err := json.Unmarshal(configBlob, configOCI); err != nil {
return nil, err
}
return configOCI, nil
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
// The result is cached; it is OK to call this however often you need.
func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.configBlob == nil {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil {
return nil, err
}
defer stream.Close()
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.ConfigDescriptor.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
return m.configBlob, nil
}
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
// It returns false if the manifest does not embed a Docker reference.
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
return false
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
getter := func(info types.BlobInfo) ([]byte, error) {
if info.Digest != m.ConfigInfo().Digest {
// Shouldn't ever happen
return nil, errors.New("asked for a different config blob")
}
config, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
return config, nil
}
return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
// (most importantly it forces us to download the full layers even if they are already present at the destination).
func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
return false
}
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
// if the CompressionOperation and CompressionAlgorithm specified in one or more
// options.LayerInfos items is anything other than gzip.
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,
configBlob: m.configBlob,
m: manifest.Schema2Clone(m.m),
}
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
})
if err != nil {
return nil, err
}
if converted != nil {
return converted, nil
}
// No conversion required, update manifest
if options.LayerInfos != nil {
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
return memoryImageFromManifest(&copy), nil
}
func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
return imgspecv1.Descriptor{
MediaType: d.MediaType,
Size: d.Size,
Digest: d.Digest,
URLs: d.URLs,
}
}
// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
configOCI, err := m.OCIConfig(ctx)
if err != nil {
return nil, err
}
configOCIBytes, err := json.Marshal(configOCI)
if err != nil {
return nil, err
}
config := imgspecv1.Descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
}
layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
for idx := range layers {
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
switch m.m.LayersDescriptors[idx].MediaType {
case manifest.DockerV2Schema2ForeignLayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
case manifest.DockerV2Schema2LayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
default:
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
}
}
return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
}
// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
//
// Based on docker/distribution/manifest/schema1/config_builder.go
func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
dest := options.InformationOnly.Destination
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
if options.LayerInfos != nil {
if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
len(options.LayerInfos), len(m.m.LayersDescriptors))
}
convertedLayerUpdates = []types.BlobInfo{}
}
configBytes, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
imageConfig := &manifest.Schema2Image{}
if err := json.Unmarshal(configBytes, imageConfig); err != nil {
return nil, err
}
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
history := make([]manifest.Schema1History, len(imageConfig.History))
nonemptyLayerIndex := 0
var parentV1ID string // Set in the loop
v1ID := ""
haveGzippedEmptyLayer := false
if len(imageConfig.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
}
for v2Index, historyEntry := range imageConfig.History {
parentV1ID = v1ID
v1Index := len(imageConfig.History) - 1 - v2Index
var blobDigest digest.Digest
if historyEntry.EmptyLayer {
emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
// and anyway this blob is so small that its easier to just copy it than to worry about figuring out another location where to get it.
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "uploading empty layer")
}
if info.Digest != emptyLayerBlobInfo.Digest {
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
}
haveGzippedEmptyLayer = true
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
}
blobDigest = emptyLayerBlobInfo.Digest
} else {
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
}
blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++
}
// AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
if err != nil {
return nil, err
}
v1ID = v
fakeImage := manifest.Schema1V1Compatibility{
ID: v1ID,
Parent: parentV1ID,
Comment: historyEntry.Comment,
Created: historyEntry.Created,
Author: historyEntry.Author,
ThrowAway: historyEntry.EmptyLayer,
}
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
if err != nil {
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
// Note that parentV1ID of the top layer is preserved when exiting this loop
}
// Now patch in real configuration for the top layer (v1Index == 0)
v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
if err != nil {
return nil, err
}
v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(v1Config)
if options.LayerInfos != nil {
options.LayerInfos = convertedLayerUpdates
}
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
if err != nil {
return nil, err // This should never happen, we should have created all the components correctly.
}
return m1, nil
}
func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
if err := blobDigest.Validate(); err != nil {
return "", err
}
parts := append([]string{blobDigest.Hex()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil
}
func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Preserve everything we don't specifically know about.
// (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
rawContents := map[string]*json.RawMessage{}
if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
return nil, err
}
delete(rawContents, "rootfs")
delete(rawContents, "history")
updates := map[string]interface{}{"id": v1ID}
if parentV1ID != "" {
updates["parent"] = parentV1ID
}
if throwaway {
updates["throwaway"] = throwaway
}
for field, value := range updates {
encoded, err := json.Marshal(value)
if err != nil {
return nil, err
}
rawContents[field] = (*json.RawMessage)(&encoded)
}
return json.Marshal(rawContents)
}
// SupportsEncryption returns if encryption is supported for the manifest type
func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
return false
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool {
return m.m.CanChangeLayerCompression(mimeType)
}

View File

@ -12,9 +12,8 @@ import (
) )
// genericManifest is an interface for parsing, modifying image manifests and related data. // genericManifest is an interface for parsing, modifying image manifests and related data.
// Note that the public methods are intended to be a subset of types.Image // The public methods are related to types.Image so that embedding a genericManifest implements most of it,
// so that embedding a genericManifest into structs works. // but there are also public methods that are only visible by packages that can import c/image/internal/image.
// will support v1 one day...
type genericManifest interface { type genericManifest interface {
serialize() ([]byte, error) serialize() ([]byte, error)
manifestMIMEType() string manifestMIMEType() string
@ -51,6 +50,16 @@ type genericManifest interface {
// the process of updating a manifest between different manifest types was to update then convert. // the process of updating a manifest between different manifest types was to update then convert.
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
SupportsEncryption(ctx context.Context) bool SupportsEncryption(ctx context.Context) bool
// The following methods are not a part of types.Image:
// ===
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
CanChangeLayerCompression(mimeType string) bool
} }
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. // manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.

View File

@ -7,6 +7,7 @@ import (
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/iolimits"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
@ -84,6 +85,10 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
// layers in the resulting configuration isn't guaranteed to be returned to due how // layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially). // old image manifests work (docker v2s1 especially).
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
cb, err := m.ConfigBlob(ctx) cb, err := m.ConfigBlob(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -194,10 +199,15 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
// value. // value.
// This does not change the state of the original manifestOCI1 object. // This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
// Create a copy of the descriptor. // Create a copy of the descriptor.
config := schema2DescriptorFromOCI1Descriptor(m.m.Config) config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
// The only difference between OCI and DockerSchema2 is the mediatypes. The // Above, we have already checked that this manifest refers to an image, not an OCI artifact,
// so the only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents. // media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType config.MediaType = manifest.DockerV2Schema2ConfigMediaType
@ -233,7 +243,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
// value. // value.
// This does not change the state of the original manifestOCI1 object. // This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
// We can't directly convert to V1, but we can transitively convert via a V2 image if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
// We can't directly convert images to V1, but we can transitively convert via a V2 image
m2, err := m.convertToManifestSchema2(ctx, options) m2, err := m.convertToManifestSchema2(ctx, options)
if err != nil { if err != nil {
return nil, err return nil, err
@ -246,3 +260,12 @@ func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *ty
func (m *manifestOCI1) SupportsEncryption(context.Context) bool { func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
return true return true
} }
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool {
return m.m.CanChangeLayerCompression(mimeType)
}

View File

@ -0,0 +1,134 @@
// Package image consolidates knowledge about various container image formats
// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
// and exposes all of them using an unified interface.
package image
import (
"context"
"github.com/containers/image/v5/types"
)
// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
// If reference poitns to a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
//
// The caller must call .Close() on the returned ImageCloser.
//
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) {
src, err := ref.NewImageSource(ctx, sys)
if err != nil {
return nil, err
}
img, err := FromSource(ctx, sys, src)
if err != nil {
src.Close()
return nil, err
}
return img, nil
}
// imageCloser implements types.ImageCloser, perhaps allowing simple users
// to use a single object without having keep a reference to a types.ImageSource
// only to call types.ImageSource.Close().
type imageCloser struct {
types.Image
src types.ImageSource
}
// FromSource returns a types.ImageCloser implementation for the default instance of source.
// If source is a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
//
// The caller must call .Close() on the returned ImageCloser.
//
// FromSource “takes ownership” of the input ImageSource and will call src.Close()
// when the image is closed. (This does not prevent callers from using both the
// Image and ImageSource objects simultaneously, but it means that they only need to
// the Image.)
//
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
//
// Most callers can use either FromUnparsedImage or FromReference instead.
//
// This is publicly visible as c/image/image.FromSource.
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
if err != nil {
return nil, err
}
return &imageCloser{
Image: img,
src: src,
}, nil
}
func (ic *imageCloser) Close() error {
return ic.src.Close()
}
// SourcedImage is a general set of utilities for working with container images,
// whatever is their underlying transport (i.e. ImageSource-independent).
// Note the existence of docker.Image and image.memoryImage: various instances
// of a types.Image may not be a SourcedImage directly.
//
// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do.
//
// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image.
type SourcedImage struct {
*UnparsedImage
ManifestBlob []byte // The manifest of the relevant instance
ManifestMIMEType string // MIME type of ManifestBlob
// genericManifest contains data corresponding to manifestBlob.
// NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
// if you want to preserve the original manifest; use manifestBlob directly.
genericManifest
}
// FromUnparsedImage returns a types.Image implementation for unparsed.
// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate single image.
//
// The Image must not be used after the underlying ImageSource is Close()d.
//
// This is publicly visible as c/image/image.FromUnparsedImage.
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) {
// Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
// we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
// this is the only UnparsedImage implementation around, anyway.
// NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
if err != nil {
return nil, err
}
parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
if err != nil {
return nil, err
}
return &SourcedImage{
UnparsedImage: unparsed,
ManifestBlob: manifestBlob,
ManifestMIMEType: manifestMIMEType,
genericManifest: parsedManifest,
}, nil
}
// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
func (i *SourcedImage) Size() (int64, error) {
return -1, nil
}
// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
return i.ManifestBlob, i.ManifestMIMEType, nil
}
func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
}

View File

@ -0,0 +1,99 @@
package image
import (
"context"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// UnparsedImage implements types.UnparsedImage .
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
//
// This is publicly visible as c/image/image.UnparsedImage.
type UnparsedImage struct {
src types.ImageSource
instanceDigest *digest.Digest
cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
// A private cache for Manifest(), may be the empty string if guessing failed.
// Valid iff cachedManifest is not nil.
cachedManifestMIMEType string
cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
}
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
//
// This is publicly visible as c/image/image.UnparsedInstance.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
return &UnparsedImage{
src: src,
instanceDigest: instanceDigest,
}
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (i *UnparsedImage) Reference() types.ImageReference {
// Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
return i.src.Reference()
}
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
if i.cachedManifest == nil {
m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
if err != nil {
return nil, "", err
}
// ImageSource.GetManifest does not do digest verification, but we do;
// this immediately protects also any user of types.Image.
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
matches, err := manifest.MatchesDigest(m, digest)
if err != nil {
return nil, "", errors.Wrap(err, "computing manifest digest")
}
if !matches {
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
}
}
i.cachedManifest = m
i.cachedManifestMIMEType = mt
}
return i.cachedManifest, i.cachedManifestMIMEType, nil
}
// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
// The bool return value seems redundant with digest != ""; it is used explicitly
// to refuse (unexpected) situations when the digest exists but is "".
func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
if i.instanceDigest != nil {
return *i.instanceDigest, true
}
ref := i.Reference().DockerReference()
if ref != nil {
if canonical, ok := ref.(reference.Canonical); ok {
return canonical.Digest(), true
}
}
return "", false
}
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
if i.cachedSignatures == nil {
sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
if err != nil {
return nil, err
}
i.cachedSignatures = sigs
}
return i.cachedSignatures, nil
}

View File

@ -0,0 +1,32 @@
package manifest
import "fmt"
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
//
// This is publicly visible as c/image/manifest.NonImageArtifactError (but we dont provide a public constructor)
type NonImageArtifactError struct {
// Callers should not be blindly calling image-specific operations and only checking MIME types
// on failure; if they care about the artifact type, they should check before using it.
// If they blindly assume an image, they dont really need this value; just a type check
// is sufficient for basic "we can only pull images" UI.
//
// Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig,
// e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers
// to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of
// type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions
// based on that kind of data.
//
// So, lets not expose this until a specific need is identified.
mimeType string
}
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType.
func NewNonImageArtifactError(mimeType string) error {
return NonImageArtifactError{mimeType: mimeType}
}
func (e NonImageArtifactError) Error() string {
return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
}

View File

@ -118,6 +118,18 @@ type compressionMIMETypeSet map[string]string
const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported” const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found
func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet {
for _, variants := range variantTable {
for _, mt := range variants {
if mt == mimeType {
return variants
}
}
}
return nil
}
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil // compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
// to mean "no compression"), based on variantTable. // to mean "no compression"), based on variantTable.
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants // The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
@ -130,9 +142,8 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type") return "", fmt.Errorf("cannot update unknown MIME type")
} }
for _, variants := range variantTable { variants := findCompressionMIMETypeSet(variantTable, mimeType)
for _, mt := range variants { if variants != nil {
if mt == mimeType { // Found the variant
name := mtsUncompressed name := mtsUncompressed
if algorithm != nil { if algorithm != nil {
name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
@ -142,17 +153,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
return res, nil return res, nil
} }
if name != mtsUncompressed { if name != mtsUncompressed {
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)}
} }
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
} }
if name != mtsUncompressed { if name != mtsUncompressed {
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
} }
// We can't very well say “the idea of no compression is unknown” // We can't very well say “the idea of no compression is unknown”
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
}
} }
if algorithm != nil { if algorithm != nil {
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType) return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
@ -209,3 +218,13 @@ type ManifestLayerCompressionIncompatibilityError struct {
func (m ManifestLayerCompressionIncompatibilityError) Error() string { func (m ManifestLayerCompressionIncompatibilityError) Error() string {
return m.text return m.text
} }
// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType
// Note that the caller still needs to worry about a specific algorithm not being supported.
func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool {
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return false
}
variants := findCompressionMIMETypeSet(variantTable, mimeType)
return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
}

View File

@ -295,3 +295,11 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
} }
return m.ConfigDescriptor.Digest.Hex(), nil return m.ConfigDescriptor.Digest.Hex(), nil
} }
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion.
func (m *Schema2) CanChangeLayerCompression(mimeType string) bool {
return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType)
}

View File

@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/libtrust" "github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
@ -34,6 +35,10 @@ const (
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
) )
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
type NonImageArtifactError = internalManifest.NonImageArtifactError
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. // SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error { func SupportedSchema2MediaType(m string) error {
switch m { switch m {

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"strings" "strings"
internalManifest "github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types" compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec" ociencspec "github.com/containers/ocicrypt/spec"
@ -115,6 +116,12 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
// CompressionAlgorithm that isn't supported by OCI. // CompressionAlgorithm that isn't supported by OCI.
//
// Its generally the callers responsibility to determine whether a particular edit is acceptable, rather than relying on
// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs
// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression
// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently
// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers.
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) { if len(m.Layers) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
@ -151,6 +158,33 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
return nil return nil
} }
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
for _, s := range strings.Split(mediatype, "+")[1:] {
if s == "encrypted" {
return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
}
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
return mediatype + "+encrypted", nil
}
return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
}
// Serialize returns the manifest in a blob format. // Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *OCI1) Serialize() ([]byte, error) { func (m *OCI1) Serialize() ([]byte, error) {
@ -159,6 +193,14 @@ func (m *OCI1) Serialize() ([]byte, error) {
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
// We could return at least the layers, but thats already available in a better format via types.Image.LayerInfos.
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
// and is probably better served by failing; we can always re-visit that later if we fail now, but
// if we started returning some data for OCI artifacts now, we couldnt start failing in this function later.
return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
}
config, err := configGetter(m.ConfigInfo()) config, err := configGetter(m.ConfigInfo())
if err != nil { if err != nil {
return nil, err return nil, err
@ -186,35 +228,39 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
// ImageID computes an ID which can uniquely identify this image by its contents. // ImageID computes an ID which can uniquely identify this image by its contents.
func (m *OCI1) ImageID([]digest.Digest) (string, error) { func (m *OCI1) ImageID([]digest.Digest) (string, error) {
// The way m.Config.Digest “uniquely identifies” an image is
// by containing RootFS.DiffIDs, which identify the layers of the image.
// For non-image artifacts, the we cant expect the config to change
// any time the other layers (semantically) change, so this approach of
// distinguishing objects only by m.Config.Digest doesnt work in general.
//
// Any caller of this method presumably wants to disambiguate the same
// images with a different representation, but doesnt want to disambiguate
// representations (by using a manifest digest). So, submitting a non-image
// artifact to such a caller indicates an expectation mismatch.
// So, we just fail here instead of inventing some other ID value (e.g.
// by combining the config and blob layer digests). That still
// gives us the option to not fail, and return some value, in the future,
// without committing to that approach now.
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
// which cant work with non-image artifacts.)
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
}
if err := m.Config.Digest.Validate(); err != nil { if err := m.Config.Digest.Validate(); err != nil {
return "", err return "", err
} }
return m.Config.Digest.Hex(), nil return m.Config.Digest.Hex(), nil
} }
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// an error if the mediatype does not support encryption // (and the code can handle that).
func getEncryptedMediaType(mediatype string) (string, error) { // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
for _, s := range strings.Split(mediatype, "+")[1:] { // algorithms depends not on the current format, but possibly on the target of a conversion.
if s == "encrypted" { func (m *OCI1) CanChangeLayerCompression(mimeType string) bool {
return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype) if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return false
} }
} return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType)
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
return mediatype + "+encrypted", nil
}
return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
} }

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/oci/internal" "github.com/containers/image/v5/oci/internal"
ocilayout "github.com/containers/image/v5/oci/layout" ocilayout "github.com/containers/image/v5/oci/layout"
@ -122,11 +122,7 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref) return image.FromReference(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
} }
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.

View File

@ -10,7 +10,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/oci/internal" "github.com/containers/image/v5/oci/internal"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
@ -154,11 +154,7 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(sys, ref) return image.FromReference(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
} }
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together // getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/docker/policyconfiguration" "github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
genericImage "github.com/containers/image/v5/image" genericImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -132,11 +132,7 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(sys, ref) return genericImage.FromReference(ctx, sys, ref)
if err != nil {
return nil, err
}
return genericImage.FromSource(ctx, sys, src)
} }
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.

View File

@ -14,7 +14,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -184,17 +184,7 @@ func (s *ostreeImageCloser) Size() (int64, error) {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
var tmpDir string return image.FromReference(ctx, sys, ref)
if sys == nil || sys.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = sys.OSTreeTmpDirPath
}
src, err := newImageSource(tmpDir, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
} }
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.

View File

@ -9,7 +9,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
) )
@ -139,11 +139,7 @@ func (ref sifReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref) return image.FromReference(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
} }
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.

View File

@ -16,7 +16,7 @@ import (
"sync/atomic" "sync/atomic"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/internal/tmpdir"
@ -486,7 +486,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
} }
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. // putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
// The caller must arrange the blob to be eventually commited using s.commitLayer(). // The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
// Stores a layer or data blob in our temporary directory, checking that any information // Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data. // in the blobinfo matches the incoming data.
@ -641,7 +641,7 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t
} }
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. // tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually commited using s.commitLayer(). // The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly // lock the entire method as it executes fairly quickly
s.lock.Lock() s.lock.Lock()
@ -941,7 +941,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
s.lock.Unlock() s.lock.Unlock()
if ok { if ok {
layer, err := al.PutAs(id, lastLayer, nil) layer, err := al.PutAs(id, lastLayer, nil)
if err != nil { if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
return errors.Wrapf(err, "failed to put layer from digest and labels") return errors.Wrapf(err, "failed to put layer from digest and labels")
} }
lastLayer = layer.ID lastLayer = layer.ID

View File

@ -7,7 +7,7 @@ import (
"strings" "strings"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -67,16 +67,7 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := r.NewImageSource(ctx, sys) return image.FromReference(ctx, sys, r)
if err != nil {
return nil, err
}
img, err := image.FromSource(ctx, sys, src)
if err != nil {
src.Close()
return nil, err
}
return img, nil
} }
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {

View File

@ -8,8 +8,7 @@
[![Coverage Status][9]][10] [![Coverage Status][9]][10]
[![Sourcegraph][11]][12] [![Sourcegraph][11]][12]
[![FOSSA Status][13]][14] [![FOSSA Status][13]][14]
[![Become my sponsor][15]][16]
[![GoCenter Kudos][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png [1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo [2]: https://travis-ci.org/imdario/mergo
@ -25,8 +24,8 @@
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge [12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo [15]: https://img.shields.io/github/sponsors/imdario
[16]: https://search.gocenter.io/github.com/imdario/mergo [16]: https://github.com/sponsors/imdario
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status ## Status
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note ### Important note
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> <a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a> <a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
### Mergo in the wild ### Mergo in the wild
- [cli/cli](https://github.com/cli/cli)
- [moby/moby](https://github.com/moby/moby) - [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch) - [vmware/dispatch](https://github.com/vmware/dispatch)
@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
- [jnuthong/item_search](https://github.com/jnuthong/item_search) - [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [containerssh/containerssh](https://github.com/containerssh/containerssh) - [containerssh/containerssh](https://github.com/containerssh/containerssh)
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
## Install ## Install
@ -168,7 +169,7 @@ func main() {
Note: if test are failing due missing package, please execute: Note: if test are failing due missing package, please execute:
go get gopkg.in/yaml.v2 go get gopkg.in/yaml.v3
### Transformers ### Transformers
@ -218,7 +219,6 @@ func main() {
} }
``` ```
## Contact me ## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
Written by [Dario Castañé](http://dario.im). Written by [Dario Castañé](http://dario.im).
## Top Contributors
[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
## License ## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).

View File

@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
visited[h] = &visit{addr, typ, seen} visited[h] = &visit{addr, typ, seen}
} }
if config.Transformers != nil && !isEmptyValue(dst) { if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil { if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src) err = fn(dst, src)
return return

View File

@ -17,7 +17,7 @@ import (
var ( var (
ErrNilArguments = errors.New("src and dst must not be nil") ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
ErrNotSupported = errors.New("only structs and maps are supported") ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer") ErrNonPointerAgument = errors.New("dst must be a pointer")
@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return return
} }
vDst = reflect.ValueOf(dst).Elem() vDst = reflect.ValueOf(dst).Elem()
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported err = ErrNotSupported
return return
} }

View File

@ -17,6 +17,24 @@ This package provides various compression algorithms.
# changelog # changelog
* May 25, 2022 (v1.15.5)
* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
* huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
* zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
* zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
* May 5, 2022 (v1.15.3) * May 5, 2022 (v1.15.3)
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
@ -77,6 +95,9 @@ While the release has been extensively tested, it is recommended to testing when
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
<details>
<summary>See changes to v1.13.x</summary>
* Aug 30, 2021 (v1.13.5) * Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@ -105,6 +126,8 @@ While the release has been extensively tested, it is recommended to testing when
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
</details>
<details> <details>
<summary>See changes to v1.12.x</summary> <summary>See changes to v1.12.x</summary>

View File

@ -84,24 +84,23 @@ type advancedState struct {
length int length int
offset int offset int
maxInsertIndex int maxInsertIndex int
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
chainHead int chainHead int
hashHead [hashSize]uint32
hashPrev [windowSize]uint32
hashOffset int hashOffset int
ii uint16 // position of last match, intended to overflow to reset.
// input window: unprocessed data is window[index:windowEnd] // input window: unprocessed data is window[index:windowEnd]
index int index int
estBitsPerByte int estBitsPerByte int
hashMatch [maxMatchLength + minMatchLength]uint32 hashMatch [maxMatchLength + minMatchLength]uint32
hash uint32 // Input hash chains
ii uint16 // position of last match, intended to overflow to reset. // hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
hashHead [hashSize]uint32
hashPrev [windowSize]uint32
} }
type compressor struct { type compressor struct {
@ -259,7 +258,6 @@ func (d *compressor) fillWindow(b []byte) {
// Set the head of the hash chain to us. // Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset) s.hashHead[newH] = uint32(di + s.hashOffset)
} }
s.hash = newH
} }
// Update window information. // Update window information.
d.windowEnd += n d.windowEnd += n
@ -403,7 +401,6 @@ func (d *compressor) initDeflate() {
s.hashOffset = 1 s.hashOffset = 1
s.length = minMatchLength - 1 s.length = minMatchLength - 1
s.offset = 0 s.offset = 0
s.hash = 0
s.chainHead = -1 s.chainHead = -1
} }
@ -432,9 +429,6 @@ func (d *compressor) deflateLazy() {
} }
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if s.index < s.maxInsertIndex {
s.hash = hash4(d.window[s.index:])
}
for { for {
if sanity && s.index > d.windowEnd { if sanity && s.index > d.windowEnd {
@ -466,11 +460,11 @@ func (d *compressor) deflateLazy() {
} }
if s.index < s.maxInsertIndex { if s.index < s.maxInsertIndex {
// Update the hash // Update the hash
s.hash = hash4(d.window[s.index:]) hash := hash4(d.window[s.index:])
ch := s.hashHead[s.hash&hashMask] ch := s.hashHead[hash]
s.chainHead = int(ch) s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch s.hashPrev[s.index&windowMask] = ch
s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) s.hashHead[hash] = uint32(s.index + s.hashOffset)
} }
prevLength := s.length prevLength := s.length
prevOffset := s.offset prevOffset := s.offset
@ -503,7 +497,7 @@ func (d *compressor) deflateLazy() {
end += prevIndex end += prevIndex
idx := prevIndex + prevLength - (4 - checkOff) idx := prevIndex + prevLength - (4 - checkOff)
h := hash4(d.window[idx:]) h := hash4(d.window[idx:])
ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff) ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff)
if ch2 > minIndex { if ch2 > minIndex {
length := matchLen(d.window[prevIndex:end], d.window[ch2:]) length := matchLen(d.window[prevIndex:end], d.window[ch2:])
// It seems like a pure length metric is best. // It seems like a pure length metric is best.
@ -547,7 +541,6 @@ func (d *compressor) deflateLazy() {
// Set the head of the hash chain to us. // Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset) s.hashHead[newH] = uint32(di + s.hashOffset)
} }
s.hash = newH
} }
s.index = newIndex s.index = newIndex
@ -793,7 +786,6 @@ func (d *compressor) reset(w io.Writer) {
d.tokens.Reset() d.tokens.Reset()
s.length = minMatchLength - 1 s.length = minMatchLength - 1
s.offset = 0 s.offset = 0
s.hash = 0
s.ii = 0 s.ii = 0
s.maxInsertIndex = 0 s.maxInsertIndex = 0
} }

View File

@ -117,7 +117,7 @@ func (e *fastGen) addBlock(src []byte) int32 {
// hash4 returns the hash of u to fit in a hash table with h bits. // hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32. // Preferably h should be a constant and should always be <32.
func hash4u(u uint32, h uint8) uint32 { func hash4u(u uint32, h uint8) uint32 {
return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32) return (u * prime4bytes) >> (32 - h)
} }
type tableEntryPrev struct { type tableEntryPrev struct {

View File

@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63)) return uint16(b.value >> ((64 - n) & 63))
} }
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
return uint16(b.value >> n)
}
func (b *bitReaderShifted) advance(n uint8) { func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n b.bitsRead += n
b.value <<= n & 63 b.value <<= n & 63
@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() {
} }
} }
// finished returns true if all bits have been read from the bit stream.
func (b *bitReaderShifted) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
func (b *bitReaderShifted) remaining() uint { func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead) return b.off*8 + uint(64-b.bitsRead)
} }

View File

@ -5,8 +5,6 @@
package huff0 package huff0
import "fmt"
// bitWriter will write bits. // bitWriter will write bits.
// First bit will be LSB of the first byte of output. // First bit will be LSB of the first byte of output.
type bitWriter struct { type bitWriter struct {
@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */ 0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16NC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently. // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits b.nBits += encA.nBits + encB.nBits
} }
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
// This is fastest if bits can be zero.
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
if bits == 0 {
return
}
value <<= (16 - bits) & 15
value >>= (16 - bits) & 15
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
return
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
b.bitContainer >>= 1 << 3
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
b.bitContainer >>= 2 << 3
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
b.bitContainer >>= 3 << 3
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
b.bitContainer >>= 4 << 3
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
b.bitContainer >>= 5 << 3
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
b.bitContainer >>= 6 << 3
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
b.bitContainer >>= 7 << 3
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
b.bitContainer = 0
b.nBits = 0
return
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing. // flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() { func (b *bitWriter) flush32() {
if b.nBits < 32 { if b.nBits < 32 {
@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
b.flushAlign() b.flushAlign()
return nil return nil
} }
// reset and continue writing by appending to out.
func (b *bitWriter) reset(out []byte) {
b.bitContainer = 0
b.nBits = 0
b.out = out
}

View File

@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0 b.off = 0
} }
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
}
// Int32 returns a little endian int32 starting at current offset. // Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 { func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3]) v3 := int32(b.b[b.off+3])
@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
} }
// unread returns the unread portion of the input.
func (b byteReader) unread() []byte {
return b.b[b.off:]
}
// remain will return the number of bytes remaining. // remain will return the number of bytes remaining.
func (b byteReader) remain() int { func (b byteReader) remain() int {
return len(b.b) - b.off return len(b.b) - b.off

View File

@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true return true
} }
//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool { func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) { if len(c) < int(s.symbolLen) {
return false return false

View File

@ -11,7 +11,6 @@ import (
type dTable struct { type dTable struct {
single []dEntrySingle single []dEntrySingle
double []dEntryDouble
} }
// single-symbols decoding // single-symbols decoding
@ -19,13 +18,6 @@ type dEntrySingle struct {
entry uint16 entry uint16
} }
// double-symbols decoding
type dEntryDouble struct {
seq [4]byte
nBits uint8
len uint8
}
// Uses special code for all tables that are < 8 bits. // Uses special code for all tables that are < 8 bits.
const use8BitTables = true const use8BitTables = true
@ -35,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated. // If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table. // The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s, err = s.prepare(in) s, err = s.prepare(nil)
if err != nil { if err != nil {
return s, nil, err return s, nil, err
} }
@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
return &[4][256]byte{} return &[4][256]byte{}
} }
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size. // The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly. // The length of the supplied input must match the end of a block exactly.
@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
const shift = 56 const shift = 56
const tlSize = 1 << 8 const tlSize = 1 << 8
const tlMask = tlSize - 1
single := d.dt.single[:tlSize] single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty. // Use temp table to avoid bound checks/append penalty.

View File

@ -2,12 +2,14 @@
// +build amd64,!appengine,!noasm,gc // +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X // This file contains the specialisation of Decoder.Decompress4X
// that uses an asm implementation of its main loop. // and Decoder.Decompress1X that use an asm implementation of thir main loops.
package huff0 package huff0
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/klauspost/compress/internal/cpuinfo"
) )
// decompress4x_main_loop_x86 is an x86 assembler implementation // decompress4x_main_loop_x86 is an x86 assembler implementation
@ -146,3 +148,81 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
} }
return dst, nil return dst, nil
} }
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
type decompress1xContext struct {
pbr *bitReaderShifted
peekBits uint8
out *byte
outCap int
tbl *dEntrySingle
decoded int
}
// Error reported by asm implementations
const error_max_decoded_size_exeeded = -1
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:maxDecodedSize]
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
if maxDecodedSize >= 4 {
ctx := decompress1xContext{
pbr: &br,
out: &dst[0],
outCap: maxDecodedSize,
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
tbl: &d.dt.single[0],
}
if cpuinfo.HasBMI2() {
decompress1x_main_loop_bmi2(&ctx)
} else {
decompress1x_main_loop_amd64(&ctx)
}
if ctx.decoded == error_max_decoded_size_exeeded {
return nil, ErrMaxDecodedSizeExceeded
}
dst = dst[:ctx.decoded]
}
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if len(dst) >= maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
return dst, br.close()
}

View File

@ -660,3 +660,206 @@ skip_fill1003:
SHLQ $0x02, DX SHLQ $0x02, DX
MOVQ DX, 64(AX) MOVQ DX, 64(AX)
RET RET
// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
TEXT ·decompress1x_main_loop_amd64(SB), $0-8
MOVQ ctx+0(FP), CX
MOVQ 16(CX), DX
MOVQ 24(CX), BX
CMPQ BX, $0x04
JB error_max_decoded_size_exeeded
LEAQ (DX)(BX*1), BX
MOVQ (CX), SI
MOVQ (SI), R8
MOVQ 24(SI), R9
MOVQ 32(SI), R10
MOVBQZX 40(SI), R11
MOVQ 32(CX), SI
MOVBQZX 8(CX), DI
JMP loop_condition
main_loop:
// Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX
CMPQ CX, BX
JGE error_max_decoded_size_exeeded
// Decode 4 values
CMPQ R11, $0x20
JL bitReader_fillFast_1_end
SUBQ $0x20, R11
SUBQ $0x04, R9
MOVL (R8)(R9*1), R12
MOVQ R11, CX
SHLQ CL, R12
ORQ R12, R10
bitReader_fillFast_1_end:
MOVQ DI, CX
MOVQ R10, R12
SHRQ CL, R12
MOVW (SI)(R12*2), CX
MOVB CH, AL
MOVBQZX CL, CX
ADDQ CX, R11
SHLQ CL, R10
MOVQ DI, CX
MOVQ R10, R12
SHRQ CL, R12
MOVW (SI)(R12*2), CX
MOVB CH, AH
MOVBQZX CL, CX
ADDQ CX, R11
SHLQ CL, R10
BSWAPL AX
CMPQ R11, $0x20
JL bitReader_fillFast_2_end
SUBQ $0x20, R11
SUBQ $0x04, R9
MOVL (R8)(R9*1), R12
MOVQ R11, CX
SHLQ CL, R12
ORQ R12, R10
bitReader_fillFast_2_end:
MOVQ DI, CX
MOVQ R10, R12
SHRQ CL, R12
MOVW (SI)(R12*2), CX
MOVB CH, AH
MOVBQZX CL, CX
ADDQ CX, R11
SHLQ CL, R10
MOVQ DI, CX
MOVQ R10, R12
SHRQ CL, R12
MOVW (SI)(R12*2), CX
MOVB CH, AL
MOVBQZX CL, CX
ADDQ CX, R11
SHLQ CL, R10
BSWAPL AX
// Store the decoded values
MOVL AX, (DX)
ADDQ $0x04, DX
loop_condition:
CMPQ R9, $0x08
JGE main_loop
// Update ctx structure
MOVQ ctx+0(FP), AX
MOVQ DX, CX
MOVQ 16(AX), DX
SUBQ DX, CX
MOVQ CX, 40(AX)
MOVQ (AX), AX
MOVQ R9, 24(AX)
MOVQ R10, 32(AX)
MOVB R11, 40(AX)
RET
// Report error
error_max_decoded_size_exeeded:
MOVQ ctx+0(FP), AX
MOVQ $-1, CX
MOVQ CX, 40(AX)
RET
// func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
// Requires: BMI2
TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
MOVQ ctx+0(FP), CX
MOVQ 16(CX), DX
MOVQ 24(CX), BX
CMPQ BX, $0x04
JB error_max_decoded_size_exeeded
LEAQ (DX)(BX*1), BX
MOVQ (CX), SI
MOVQ (SI), R8
MOVQ 24(SI), R9
MOVQ 32(SI), R10
MOVBQZX 40(SI), R11
MOVQ 32(CX), SI
MOVBQZX 8(CX), DI
JMP loop_condition
main_loop:
// Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX
CMPQ CX, BX
JGE error_max_decoded_size_exeeded
// Decode 4 values
CMPQ R11, $0x20
JL bitReader_fillFast_1_end
SUBQ $0x20, R11
SUBQ $0x04, R9
MOVL (R8)(R9*1), CX
SHLXQ R11, CX, CX
ORQ CX, R10
bitReader_fillFast_1_end:
SHRXQ DI, R10, CX
MOVW (SI)(CX*2), CX
MOVB CH, AL
MOVBQZX CL, CX
ADDQ CX, R11
SHLXQ CX, R10, R10
SHRXQ DI, R10, CX
MOVW (SI)(CX*2), CX
MOVB CH, AH
MOVBQZX CL, CX
ADDQ CX, R11
SHLXQ CX, R10, R10
BSWAPL AX
CMPQ R11, $0x20
JL bitReader_fillFast_2_end
SUBQ $0x20, R11
SUBQ $0x04, R9
MOVL (R8)(R9*1), CX
SHLXQ R11, CX, CX
ORQ CX, R10
bitReader_fillFast_2_end:
SHRXQ DI, R10, CX
MOVW (SI)(CX*2), CX
MOVB CH, AH
MOVBQZX CL, CX
ADDQ CX, R11
SHLXQ CX, R10, R10
SHRXQ DI, R10, CX
MOVW (SI)(CX*2), CX
MOVB CH, AL
MOVBQZX CL, CX
ADDQ CX, R11
SHLXQ CX, R10, R10
BSWAPL AX
// Store the decoded values
MOVL AX, (DX)
ADDQ $0x04, DX
loop_condition:
CMPQ R9, $0x08
JGE main_loop
// Update ctx structure
MOVQ ctx+0(FP), AX
MOVQ DX, CX
MOVQ 16(AX), DX
SUBQ DX, CX
MOVQ CX, 40(AX)
MOVQ (AX), AX
MOVQ R9, 24(AX)
MOVQ R10, 32(AX)
MOVB R11, 40(AX)
RET
// Report error
error_max_decoded_size_exeeded:
MOVQ ctx+0(FP), AX
MOVQ $-1, CX
MOVQ CX, 40(AX)
RET

View File

@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
} }
return dst, nil return dst, nil
} }
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}

View File

@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
return v return v
} }
func (b *bitReader) get16BitsFast(n uint8) uint16 {
const regMask = 64 - 1
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
return v
}
// fillFast() will make sure at least 32 bits are available. // fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available. // There must be at least 4 bytes available.
func (b *bitReader) fillFast() { func (b *bitReader) fillFast() {

View File

@ -5,8 +5,6 @@
package zstd package zstd
import "fmt"
// bitWriter will write bits. // bitWriter will write bits.
// First bit will be LSB of the first byte of output. // First bit will be LSB of the first byte of output.
type bitWriter struct { type bitWriter struct {
@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits b.nBits += bits
} }
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.bitContainer >>= v << 3
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing. // flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() { func (b *bitWriter) flush32() {
if b.nBits < 32 { if b.nBits < 32 {

View File

@ -49,9 +49,6 @@ const (
// Maximum possible block size (all Raw+Uncompressed). // Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1 maxBlockSize = (1 << 21) - 1
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
maxCompressedLiteralSize = 1 << 18
maxRLELiteralSize = 1 << 20
maxMatchLen = 131074 maxMatchLen = 131074
maxSequences = 0x7f00 + 0xffff maxSequences = 0x7f00 + 0xffff
@ -105,7 +102,6 @@ type blockDec struct {
// Block is RLE, this is the size. // Block is RLE, this is the size.
RLESize uint32 RLESize uint32
tmp [4]byte
Type blockType Type blockType
@ -368,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
} }
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
b.literalBuf = make([]byte, litRegenSize) b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else { } else {
if litRegenSize > maxCompressedLiteralSize { b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
// Exceptional
b.literalBuf = make([]byte, litRegenSize)
} else {
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
}
} }
} }
literals = b.literalBuf[:litRegenSize] literals = b.literalBuf[:litRegenSize]
@ -405,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it. // Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize) b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else { } else {
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
} }
} }
var err error var err error
// Use our out buffer. // Use our out buffer.
huff.MaxDecodedSize = maxCompressedBlockSize huff.MaxDecodedSize = litRegenSize
if fourStreams { if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else { } else {
@ -437,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it. // Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize) b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else { } else {
b.literalBuf = make([]byte, 0, maxCompressedBlockSize) b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
} }
} }
huff := hist.huffTree huff := hist.huffTree
@ -456,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
return in, err return in, err
} }
hist.huffTree = huff hist.huffTree = huff
huff.MaxDecodedSize = maxCompressedBlockSize huff.MaxDecodedSize = litRegenSize
// Use our out buffer. // Use our out buffer.
if fourStreams { if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@ -471,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
if len(literals) != litRegenSize { if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
} }
// Re-cap to get extra size.
literals = b.literalBuf[:len(literals)]
if debugDecoder { if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
} }

View File

@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil return r, nil
} }
func (b *byteBuf) remain() []byte {
return *b
}
func (b *byteBuf) readByte() (byte, error) { func (b *byteBuf) readByte() (byte, error) {
bb := *b bb := *b
if len(bb) < 1 { if len(bb) < 1 {

View File

@ -13,12 +13,6 @@ type byteReader struct {
off int off int
} }
// init will initialize the reader and set the input.
func (b *byteReader) init(in []byte) {
b.b = in
b.off = 0
}
// advance the stream b n bytes. // advance the stream b n bytes.
func (b *byteReader) advance(n uint) { func (b *byteReader) advance(n uint) {
b.off += int(n) b.off += int(n)

View File

@ -637,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
// Create Decoder: // Create Decoder:
// ASYNC: // ASYNC:
// Spawn 4 go routines. // Spawn 3 go routines.
// 0: Read frames and decode blocks. // 0: Read frames and decode block literals.
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. // 1: Decode sequences.
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. // 2: Execute sequences, send to output.
// 3: Wait for stream history, execute sequences, send stream history.
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done() defer d.streamWg.Done()
br := readerWrapper{r: r} br := readerWrapper{r: r}
var seqPrepare = make(chan *blockDec, d.o.concurrent)
var seqDecode = make(chan *blockDec, d.o.concurrent) var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent) var seqExecute = make(chan *blockDec, d.o.concurrent)
// Async 1: Prepare blocks... // Async 1: Decode sequences...
go func() {
var hist history
var hasErr bool
for block := range seqPrepare {
if hasErr {
if block != nil {
seqDecode <- block
}
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 1: new history")
}
hist.reset()
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
continue
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
close(seqDecode)
}()
// Async 2: Decode sequences...
go func() { go func() {
var hist history var hist history
var hasErr bool var hasErr bool
@ -704,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
} }
if block.async.newHist != nil { if block.async.newHist != nil {
if debugDecoder { if debugDecoder {
println("Async 2: new history, recent:", block.async.newHist.recentOffsets) println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
} }
hist.decoders = block.async.newHist.decoders hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets hist.recentOffsets = block.async.newHist.recentOffsets
@ -758,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
} }
if block.async.newHist != nil { if block.async.newHist != nil {
if debugDecoder { if debugDecoder {
println("Async 3: new history") println("Async 2: new history")
} }
hist.windowSize = block.async.newHist.windowSize hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
@ -845,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
decodeStream: decodeStream:
for { for {
var hist history
var hasErr bool
decodeBlock := func(block *blockDec) {
if hasErr {
if block != nil {
seqDecode <- block
}
return
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
return
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
frame := d.frame frame := d.frame
if debugDecoder { if debugDecoder {
println("New frame...") println("New frame...")
@ -871,7 +856,7 @@ decodeStream:
case <-ctx.Done(): case <-ctx.Done():
case dec := <-d.decoders: case dec := <-d.decoders:
dec.sendErr(err) dec.sendErr(err)
seqPrepare <- dec decodeBlock(dec)
} }
break decodeStream break decodeStream
} }
@ -891,6 +876,10 @@ decodeStream:
if debugDecoder { if debugDecoder {
println("Alloc History:", h.allocFrameBuffer) println("Alloc History:", h.allocFrameBuffer)
} }
hist.reset()
if h.dict != nil {
hist.setDict(h.dict)
}
dec.async.newHist = &h dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize dec.async.fcs = frame.FrameContentSize
historySent = true historySent = true
@ -917,7 +906,7 @@ decodeStream:
} }
err = dec.err err = dec.err
last := dec.Last last := dec.Last
seqPrepare <- dec decodeBlock(dec)
if err != nil { if err != nil {
break decodeStream break decodeStream
} }
@ -926,7 +915,7 @@ decodeStream:
} }
} }
} }
close(seqPrepare) close(seqDecode)
wg.Wait() wg.Wait()
d.frame.history.b = frameHistCache d.frame.history.b = frameHistCache
} }

View File

@ -156,8 +156,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -518,8 +518,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -674,8 +674,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -1047,8 +1047,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match

View File

@ -127,8 +127,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -439,8 +439,8 @@ encodeLoop:
var t int32 var t int32
for { for {
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -785,8 +785,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -969,7 +969,7 @@ encodeLoop:
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0 e.longTable[longHash1] = te0
e.longTable[longHash2] = te1 e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1) e.markLongShardDirty(longHash1)
@ -1002,8 +1002,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match

View File

@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
} }
// If we can do everything in one block, prefer that. // If we can do everything in one block, prefer that.
if len(src) <= maxCompressedBlockSize { if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true) enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block. // Slightly faster with no history and everything in one block.
if e.o.crc { if e.o.crc {

View File

@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall return ErrWindowSizeTooSmall
} }
d.history.windowSize = int(d.WindowSize) d.history.windowSize = int(d.WindowSize)
if d.o.lowMem && d.history.windowSize < maxBlockSize { if !d.o.lowMem || d.history.windowSize < maxBlockSize {
// Alloc 2x window size if not low-mem, or very small window size.
d.history.allocFrameBuffer = d.history.windowSize * 2 d.history.allocFrameBuffer = d.history.windowSize * 2
// TODO: Maybe use FrameContent size
} else { } else {
// Alloc with one additional block
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
} }

View File

@ -229,18 +229,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16) return uint16(d >> 16)
} }
func (d decSymbol) baseline() uint32 {
return uint32(d >> 32)
}
func (d decSymbol) baselineInt() int { func (d decSymbol) baselineInt() int {
return int(d >> 32) return int(d >> 32)
} }
func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
}
func (d *decSymbol) setNBits(nBits uint8) { func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00 const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits) *d = (*d & mask) | decSymbol(nBits)
@ -256,11 +248,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16 *d = (*d & mask) | decSymbol(state)<<16
} }
func (d *decSymbol) setBaseline(baseline uint32) {
const mask = 0xffffffff
*d = (*d & mask) | decSymbol(baseline)<<32
}
func (d *decSymbol) setExt(addBits uint8, baseline uint32) { func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@ -377,34 +364,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)] s.state = dt[br.getBits(tableLog)]
} }
// next returns the current symbol and sets the next state.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) next(br *bitReader) {
lowBits := uint16(br.getBits(s.state.nbBits()))
s.state = s.dt[s.state.newState()+lowBits]
}
// finished returns true if all bits have been read from the bitstream
// and the next state would require reading bits from the input.
func (s *fseState) finished(br *bitReader) bool {
return br.finished() && s.state.nbBits() > 0
}
// final returns the current state symbol without decoding the next.
func (s *fseState) final() (int, uint8) {
return s.state.baselineInt(), s.state.addBits()
}
// final returns the current state symbol without decoding the next. // final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) { func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits() return s.baselineInt(), s.addBits()
} }
// nextFast returns the next symbol and sets the next state.
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
lowBits := br.get16BitsFast(s.state.nbBits())
s.state = s.dt[s.state.newState()+lowBits]
return s.state.baseline(), s.state.addBits()
}

View File

@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0 s.clearCount = maxCount != 0
} }
// prepare will prepare and allocate scratch tables used for both compression and decompression.
func (s *fseEncoder) prepare() (*fseEncoder, error) {
if s == nil {
s = &fseEncoder{}
}
s.useRLE = false
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0
}
s.clearCount = false
}
return s, nil
}
// allocCtable will allocate tables needed for compression. // allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used. // If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() { func (s *fseEncoder) allocCtable() {
@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu] c.state = c.stateTable[lu]
} }
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encode(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// flush will write the tablelog to the output and flush the remaining full bytes. // flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) { func (c *cState) flush(tableLog uint8) {
c.bw.flush32() c.bw.flush32()

View File

@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length) return (uint32(u) * prime4bytes) >> (32 - length)
} }
} }
// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash3(u uint32, h uint8) uint32 {
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
}

View File

@ -188,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
} }
} }
} }
// Add final literals // Add final literals
copy(out[t:], s.literals) copy(out[t:], s.literals)
if debugDecoder { if debugDecoder {
@ -203,12 +204,11 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
// decode sequences from the stream with the provided history. // decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(hist []byte) error { func (s *sequenceDecs) decodeSync(hist []byte) error {
if true {
supported, err := s.decodeSyncSimple(hist) supported, err := s.decodeSyncSimple(hist)
if supported { if supported {
return err return err
} }
}
br := s.br br := s.br
seqs := s.nSeqs seqs := s.nSeqs
startSize := len(s.out) startSize := len(s.out)
@ -396,6 +396,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
ofState = ofTable[ofState.newState()&maxTableMask] ofState = ofTable[ofState.newState()&maxTableMask]
} else { } else {
bits := br.get32BitsFast(nBits) bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask] llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@ -418,16 +419,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return br.close() return br.close()
} }
// update states, at least 27 bits must be available.
func (s *sequenceDecs) update(br *bitReader) {
// Max 8 bits
s.litLengths.state.next(br)
// Max 9 bits
s.matchLengths.state.next(br)
// Max 8 bits
s.offsets.state.next(br)
}
var bitMask [16]uint16 var bitMask [16]uint16
func init() { func init() {
@ -436,87 +427,6 @@ func init() {
} }
} }
// update states, at least 27 bits must be available.
func (s *sequenceDecs) updateAlt(br *bitReader) {
// Update all 3 states at once. Approx 20% faster.
a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
nBits := a.nbBits() + b.nbBits() + c.nbBits()
if nBits == 0 {
s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
s.offsets.state.state = s.offsets.state.dt[c.newState()]
return
}
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
lowBits = uint16(bits >> (c.nbBits() & 31))
lowBits &= bitMask[b.nbBits()&15]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
lowBits = uint16(bits) & bitMask[c.nbBits()&15]
s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
}
// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
ml, mlB := mlState.final()
mo, moB := ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
return
}
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
return
}
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
return
}
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream. // Final will not read from stream.
ll, llB := llState.final() ll, llB := llState.final()

View File

@ -62,6 +62,10 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
useSafe = true useSafe = true
} }
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
useSafe = true
}
br := s.br br := s.br
maxBlockSize := maxCompressedBlockSize maxBlockSize := maxCompressedBlockSize
@ -301,6 +305,10 @@ type executeAsmContext struct {
//go:noescape //go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
// executeSimple handles cases when dictionary is not used. // executeSimple handles cases when dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size... // Ensure we have enough output size...
@ -327,8 +335,12 @@ func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
literals: s.literals, literals: s.literals,
windowSize: s.windowSize, windowSize: s.windowSize,
} }
var ok bool
ok := sequenceDecs_executeSimple_amd64(&ctx) if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
} else {
ok = sequenceDecs_executeSimple_amd64(&ctx)
}
if !ok { if !ok {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", return fmt.Errorf("match offset (%d) bigger than current history (%d)",
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))

View File

@ -711,54 +711,49 @@ sequenceDecs_decode_bmi2_fill_2_end:
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00 CMPQ 96(CX), $0x00
JZ sequenceDecs_decode_bmi2_skip_update JZ sequenceDecs_decode_bmi2_skip_update
LEAQ (SI)(DI*1), R14
// Update Literal Length State ADDQ R8, R14
MOVBQZX SI, R14 MOVBQZX R14, R14
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
LEAQ (DX)(R14*1), CX LEAQ (DX)(R14*1), CX
MOVQ AX, R15 MOVQ AX, R15
MOVQ CX, DX MOVQ CX, DX
ROLQ CL, R15 ROLQ CL, R15
BZHIQ R14, R15, R15 BZHIQ R14, R15, R15
ADDQ R15, SI
// Load ctx.llTable // Update Offset State
BZHIQ R8, R15, CX
SHRXQ R8, R15, R15
MOVQ $0x00001010, R14
BEXTRQ R14, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ (CX), CX MOVQ 48(CX), CX
MOVQ (CX)(SI*8), SI MOVQ (CX)(R8*8), R8
// Update Match Length State // Update Match Length State
MOVBQZX DI, R14 BZHIQ DI, R15, CX
MOVQ $0x00001010, CX SHRXQ DI, R15, R15
BEXTRQ CX, DI, DI MOVQ $0x00001010, R14
LEAQ (DX)(R14*1), CX BEXTRQ R14, DI, DI
MOVQ AX, R15 ADDQ CX, DI
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, DI
// Load ctx.mlTable // Load ctx.mlTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI MOVQ (CX)(DI*8), DI
// Update Offset State // Update Literal Length State
MOVBQZX R8, R14 BZHIQ SI, R15, CX
MOVQ $0x00001010, CX MOVQ $0x00001010, R14
BEXTRQ CX, R8, R8 BEXTRQ R14, SI, SI
LEAQ (DX)(R14*1), CX ADDQ CX, SI
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, R8
// Load ctx.ofTable // Load ctx.llTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX MOVQ (CX), CX
MOVQ (CX)(R8*8), R8 MOVQ (CX)(SI*8), SI
sequenceDecs_decode_bmi2_skip_update: sequenceDecs_decode_bmi2_skip_update:
// Adjust offset // Adjust offset
@ -971,54 +966,49 @@ sequenceDecs_decode_56_bmi2_fill_end:
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00 CMPQ 96(CX), $0x00
JZ sequenceDecs_decode_56_bmi2_skip_update JZ sequenceDecs_decode_56_bmi2_skip_update
LEAQ (SI)(DI*1), R14
// Update Literal Length State ADDQ R8, R14
MOVBQZX SI, R14 MOVBQZX R14, R14
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
LEAQ (DX)(R14*1), CX LEAQ (DX)(R14*1), CX
MOVQ AX, R15 MOVQ AX, R15
MOVQ CX, DX MOVQ CX, DX
ROLQ CL, R15 ROLQ CL, R15
BZHIQ R14, R15, R15 BZHIQ R14, R15, R15
ADDQ R15, SI
// Load ctx.llTable // Update Offset State
BZHIQ R8, R15, CX
SHRXQ R8, R15, R15
MOVQ $0x00001010, R14
BEXTRQ R14, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ (CX), CX MOVQ 48(CX), CX
MOVQ (CX)(SI*8), SI MOVQ (CX)(R8*8), R8
// Update Match Length State // Update Match Length State
MOVBQZX DI, R14 BZHIQ DI, R15, CX
MOVQ $0x00001010, CX SHRXQ DI, R15, R15
BEXTRQ CX, DI, DI MOVQ $0x00001010, R14
LEAQ (DX)(R14*1), CX BEXTRQ R14, DI, DI
MOVQ AX, R15 ADDQ CX, DI
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, DI
// Load ctx.mlTable // Load ctx.mlTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI MOVQ (CX)(DI*8), DI
// Update Offset State // Update Literal Length State
MOVBQZX R8, R14 BZHIQ SI, R15, CX
MOVQ $0x00001010, CX MOVQ $0x00001010, R14
BEXTRQ CX, R8, R8 BEXTRQ R14, SI, SI
LEAQ (DX)(R14*1), CX ADDQ CX, SI
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, R8
// Load ctx.ofTable // Load ctx.llTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX MOVQ (CX), CX
MOVQ (CX)(R8*8), R8 MOVQ (CX)(SI*8), SI
sequenceDecs_decode_56_bmi2_skip_update: sequenceDecs_decode_56_bmi2_skip_update:
// Adjust offset // Adjust offset
@ -1162,6 +1152,228 @@ TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
// outBase += outPosition // outBase += outPosition
ADDQ DI, BX ADDQ DI, BX
main_loop:
MOVQ (AX), R11
MOVQ 16(AX), R12
MOVQ 8(AX), R13
// Copy literals
TESTQ R11, R11
JZ check_offset
XORQ R14, R14
copy_1:
MOVUPS (SI)(R14*1), X0
MOVUPS X0, (BX)(R14*1)
ADDQ $0x10, R14
CMPQ R14, R11
JB copy_1
ADDQ R11, SI
ADDQ R11, BX
ADDQ R11, DI
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
check_offset:
LEAQ (DI)(R10*1), R11
CMPQ R12, R11
JG error_match_off_too_big
CMPQ R12, R8
JG error_match_off_too_big
// Copy match from history
MOVQ R12, R11
SUBQ DI, R11
JLS copy_match
MOVQ R9, R14
SUBQ R11, R14
CMPQ R13, R11
JGE copy_all_from_history
XORQ R11, R11
TESTQ $0x00000001, R13
JZ copy_4_word
MOVB (R14)(R11*1), R12
MOVB R12, (BX)(R11*1)
ADDQ $0x01, R11
copy_4_word:
TESTQ $0x00000002, R13
JZ copy_4_dword
MOVW (R14)(R11*1), R12
MOVW R12, (BX)(R11*1)
ADDQ $0x02, R11
copy_4_dword:
TESTQ $0x00000004, R13
JZ copy_4_qword
MOVL (R14)(R11*1), R12
MOVL R12, (BX)(R11*1)
ADDQ $0x04, R11
copy_4_qword:
TESTQ $0x00000008, R13
JZ copy_4_test
MOVQ (R14)(R11*1), R12
MOVQ R12, (BX)(R11*1)
ADDQ $0x08, R11
JMP copy_4_test
copy_4:
MOVUPS (R14)(R11*1), X0
MOVUPS X0, (BX)(R11*1)
ADDQ $0x10, R11
copy_4_test:
CMPQ R11, R13
JB copy_4
ADDQ R13, DI
ADDQ R13, BX
ADDQ $0x18, AX
INCQ DX
CMPQ DX, CX
JB main_loop
JMP loop_finished
copy_all_from_history:
XORQ R15, R15
TESTQ $0x00000001, R11
JZ copy_5_word
MOVB (R14)(R15*1), BP
MOVB BP, (BX)(R15*1)
ADDQ $0x01, R15
copy_5_word:
TESTQ $0x00000002, R11
JZ copy_5_dword
MOVW (R14)(R15*1), BP
MOVW BP, (BX)(R15*1)
ADDQ $0x02, R15
copy_5_dword:
TESTQ $0x00000004, R11
JZ copy_5_qword
MOVL (R14)(R15*1), BP
MOVL BP, (BX)(R15*1)
ADDQ $0x04, R15
copy_5_qword:
TESTQ $0x00000008, R11
JZ copy_5_test
MOVQ (R14)(R15*1), BP
MOVQ BP, (BX)(R15*1)
ADDQ $0x08, R15
JMP copy_5_test
copy_5:
MOVUPS (R14)(R15*1), X0
MOVUPS X0, (BX)(R15*1)
ADDQ $0x10, R15
copy_5_test:
CMPQ R15, R11
JB copy_5
ADDQ R11, BX
ADDQ R11, DI
SUBQ R11, R13
// Copy match from the current buffer
copy_match:
TESTQ R13, R13
JZ handle_loop
MOVQ BX, R11
SUBQ R12, R11
// ml <= mo
CMPQ R13, R12
JA copy_overlapping_match
// Copy non-overlapping match
ADDQ R13, DI
MOVQ BX, R12
ADDQ R13, BX
copy_2:
MOVUPS (R11), X0
MOVUPS X0, (R12)
ADDQ $0x10, R11
ADDQ $0x10, R12
SUBQ $0x10, R13
JHI copy_2
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
ADDQ R13, DI
copy_slow_3:
MOVB (R11), R12
MOVB R12, (BX)
INCQ R11
INCQ BX
DECQ R13
JNZ copy_slow_3
handle_loop:
ADDQ $0x18, AX
INCQ DX
CMPQ DX, CX
JB main_loop
loop_finished:
// Return value
MOVB $0x01, ret+8(FP)
// Update the context
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
MOVQ SI, 112(AX)
RET
error_match_off_too_big:
// Return value
MOVB $0x00, ret+8(FP)
// Update the context
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
MOVQ SI, 112(AX)
RET
empty_seqs:
// Return value
MOVB $0x01, ret+8(FP)
RET
// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
// Requires: SSE
TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9
MOVQ ctx+0(FP), R10
MOVQ 8(R10), CX
TESTQ CX, CX
JZ empty_seqs
MOVQ (R10), AX
MOVQ 24(R10), DX
MOVQ 32(R10), BX
MOVQ 80(R10), SI
MOVQ 104(R10), DI
MOVQ 120(R10), R8
MOVQ 56(R10), R9
MOVQ 64(R10), R10
ADDQ R10, R9
// seqsBase += 24 * seqIndex
LEAQ (DX)(DX*2), R11
SHLQ $0x03, R11
ADDQ R11, AX
// outBase += outPosition
ADDQ DI, BX
main_loop: main_loop:
MOVQ (AX), R11 MOVQ (AX), R11
MOVQ 16(AX), R12 MOVQ 16(AX), R12
@ -1327,16 +1539,44 @@ copy_match:
// Copy non-overlapping match // Copy non-overlapping match
ADDQ R13, DI ADDQ R13, DI
MOVQ BX, R12 XORQ R12, R12
ADDQ R13, BX TESTQ $0x00000001, R13
JZ copy_2_word
MOVB (R11)(R12*1), R14
MOVB R14, (BX)(R12*1)
ADDQ $0x01, R12
copy_2_word:
TESTQ $0x00000002, R13
JZ copy_2_dword
MOVW (R11)(R12*1), R14
MOVW R14, (BX)(R12*1)
ADDQ $0x02, R12
copy_2_dword:
TESTQ $0x00000004, R13
JZ copy_2_qword
MOVL (R11)(R12*1), R14
MOVL R14, (BX)(R12*1)
ADDQ $0x04, R12
copy_2_qword:
TESTQ $0x00000008, R13
JZ copy_2_test
MOVQ (R11)(R12*1), R14
MOVQ R14, (BX)(R12*1)
ADDQ $0x08, R12
JMP copy_2_test
copy_2: copy_2:
MOVUPS (R11), X0 MOVUPS (R11)(R12*1), X0
MOVUPS X0, (R12) MOVUPS X0, (BX)(R12*1)
ADDQ $0x10, R11
ADDQ $0x10, R12 ADDQ $0x10, R12
SUBQ $0x10, R13
JHI copy_2 copy_2_test:
CMPQ R12, R13
JB copy_2
ADDQ R13, BX
JMP handle_loop JMP handle_loop
// Copy overlapping match // Copy overlapping match
@ -1673,40 +1913,11 @@ sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
TESTQ AX, AX TESTQ AX, AX
JZ check_offset JZ check_offset
XORQ R14, R14 XORQ R14, R14
TESTQ $0x00000001, AX
JZ copy_1_word
MOVB (R11)(R14*1), R15
MOVB R15, (R10)(R14*1)
ADDQ $0x01, R14
copy_1_word:
TESTQ $0x00000002, AX
JZ copy_1_dword
MOVW (R11)(R14*1), R15
MOVW R15, (R10)(R14*1)
ADDQ $0x02, R14
copy_1_dword:
TESTQ $0x00000004, AX
JZ copy_1_qword
MOVL (R11)(R14*1), R15
MOVL R15, (R10)(R14*1)
ADDQ $0x04, R14
copy_1_qword:
TESTQ $0x00000008, AX
JZ copy_1_test
MOVQ (R11)(R14*1), R15
MOVQ R15, (R10)(R14*1)
ADDQ $0x08, R14
JMP copy_1_test
copy_1: copy_1:
MOVUPS (R11)(R14*1), X0 MOVUPS (R11)(R14*1), X0
MOVUPS X0, (R10)(R14*1) MOVUPS X0, (R10)(R14*1)
ADDQ $0x10, R14 ADDQ $0x10, R14
copy_1_test:
CMPQ R14, AX CMPQ R14, AX
JB copy_1 JB copy_1
ADDQ AX, R11 ADDQ AX, R11
@ -2050,54 +2261,49 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00 CMPQ 96(CX), $0x00
JZ sequenceDecs_decodeSync_bmi2_skip_update JZ sequenceDecs_decodeSync_bmi2_skip_update
LEAQ (SI)(DI*1), R13
// Update Literal Length State ADDQ R8, R13
MOVBQZX SI, R13 MOVBQZX R13, R13
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
LEAQ (DX)(R13*1), CX LEAQ (DX)(R13*1), CX
MOVQ AX, R14 MOVQ AX, R14
MOVQ CX, DX MOVQ CX, DX
ROLQ CL, R14 ROLQ CL, R14
BZHIQ R13, R14, R14 BZHIQ R13, R14, R14
ADDQ R14, SI
// Load ctx.llTable // Update Offset State
BZHIQ R8, R14, CX
SHRXQ R8, R14, R14
MOVQ $0x00001010, R13
BEXTRQ R13, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ (CX), CX MOVQ 48(CX), CX
MOVQ (CX)(SI*8), SI MOVQ (CX)(R8*8), R8
// Update Match Length State // Update Match Length State
MOVBQZX DI, R13 BZHIQ DI, R14, CX
MOVQ $0x00001010, CX SHRXQ DI, R14, R14
BEXTRQ CX, DI, DI MOVQ $0x00001010, R13
LEAQ (DX)(R13*1), CX BEXTRQ R13, DI, DI
MOVQ AX, R14 ADDQ CX, DI
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, DI
// Load ctx.mlTable // Load ctx.mlTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI MOVQ (CX)(DI*8), DI
// Update Offset State // Update Literal Length State
MOVBQZX R8, R13 BZHIQ SI, R14, CX
MOVQ $0x00001010, CX MOVQ $0x00001010, R13
BEXTRQ CX, R8, R8 BEXTRQ R13, SI, SI
LEAQ (DX)(R13*1), CX ADDQ CX, SI
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, R8
// Load ctx.ofTable // Load ctx.llTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX MOVQ (CX), CX
MOVQ (CX)(R8*8), R8 MOVQ (CX)(SI*8), SI
sequenceDecs_decodeSync_bmi2_skip_update: sequenceDecs_decodeSync_bmi2_skip_update:
// Adjust offset // Adjust offset
@ -2180,40 +2386,11 @@ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
TESTQ CX, CX TESTQ CX, CX
JZ check_offset JZ check_offset
XORQ R14, R14 XORQ R14, R14
TESTQ $0x00000001, CX
JZ copy_1_word
MOVB (R10)(R14*1), R15
MOVB R15, (R9)(R14*1)
ADDQ $0x01, R14
copy_1_word:
TESTQ $0x00000002, CX
JZ copy_1_dword
MOVW (R10)(R14*1), R15
MOVW R15, (R9)(R14*1)
ADDQ $0x02, R14
copy_1_dword:
TESTQ $0x00000004, CX
JZ copy_1_qword
MOVL (R10)(R14*1), R15
MOVL R15, (R9)(R14*1)
ADDQ $0x04, R14
copy_1_qword:
TESTQ $0x00000008, CX
JZ copy_1_test
MOVQ (R10)(R14*1), R15
MOVQ R15, (R9)(R14*1)
ADDQ $0x08, R14
JMP copy_1_test
copy_1: copy_1:
MOVUPS (R10)(R14*1), X0 MOVUPS (R10)(R14*1), X0
MOVUPS X0, (R9)(R14*1) MOVUPS X0, (R9)(R14*1)
ADDQ $0x10, R14 ADDQ $0x10, R14
copy_1_test:
CMPQ R14, CX CMPQ R14, CX
JB copy_1 JB copy_1
ADDQ CX, R10 ADDQ CX, R10
@ -3114,54 +3291,49 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00 CMPQ 96(CX), $0x00
JZ sequenceDecs_decodeSync_safe_bmi2_skip_update JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
LEAQ (SI)(DI*1), R13
// Update Literal Length State ADDQ R8, R13
MOVBQZX SI, R13 MOVBQZX R13, R13
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
LEAQ (DX)(R13*1), CX LEAQ (DX)(R13*1), CX
MOVQ AX, R14 MOVQ AX, R14
MOVQ CX, DX MOVQ CX, DX
ROLQ CL, R14 ROLQ CL, R14
BZHIQ R13, R14, R14 BZHIQ R13, R14, R14
ADDQ R14, SI
// Load ctx.llTable // Update Offset State
BZHIQ R8, R14, CX
SHRXQ R8, R14, R14
MOVQ $0x00001010, R13
BEXTRQ R13, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ (CX), CX MOVQ 48(CX), CX
MOVQ (CX)(SI*8), SI MOVQ (CX)(R8*8), R8
// Update Match Length State // Update Match Length State
MOVBQZX DI, R13 BZHIQ DI, R14, CX
MOVQ $0x00001010, CX SHRXQ DI, R14, R14
BEXTRQ CX, DI, DI MOVQ $0x00001010, R13
LEAQ (DX)(R13*1), CX BEXTRQ R13, DI, DI
MOVQ AX, R14 ADDQ CX, DI
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, DI
// Load ctx.mlTable // Load ctx.mlTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI MOVQ (CX)(DI*8), DI
// Update Offset State // Update Literal Length State
MOVBQZX R8, R13 BZHIQ SI, R14, CX
MOVQ $0x00001010, CX MOVQ $0x00001010, R13
BEXTRQ CX, R8, R8 BEXTRQ R13, SI, SI
LEAQ (DX)(R13*1), CX ADDQ CX, SI
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, R8
// Load ctx.ofTable // Load ctx.llTable
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX MOVQ (CX), CX
MOVQ (CX)(R8*8), R8 MOVQ (CX)(SI*8), SI
sequenceDecs_decodeSync_safe_bmi2_skip_update: sequenceDecs_decodeSync_safe_bmi2_skip_update:
// Adjust offset // Adjust offset

View File

@ -18,7 +18,14 @@ const ZipMethodWinZip = 93
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT // See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20 const ZipMethodPKWare = 20
var zipReaderPool sync.Pool // zipReaderPool is the default reader pool.
var zipReaderPool = sync.Pool{New: func() interface{} {
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
if err != nil {
panic(err)
}
return z
}}
// newZipReader creates a pooled zip decompressor. // newZipReader creates a pooled zip decompressor.
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {

View File

@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) {
} }
} }
// matchLenFast does matching, but will not match the last up to 7 bytes.
func matchLenFast(a, b []byte) int {
endI := len(a) & (math.MaxInt32 - 7)
for i := 0; i < endI; i += 8 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
return i + bits.TrailingZeros64(diff)>>3
}
}
return endI
}
// matchLen returns the maximum length. // matchLen returns the maximum length.
// a must be the shortest of the two. // a must be the shortest of the two.
// The function also returns whether all bytes matched. // The function also returns whether all bytes matched.

View File

@ -53,7 +53,7 @@ func relError(file, path string) error {
// LookPath instead returns an error. // LookPath instead returns an error.
func LookPath(file string) (string, error) { func LookPath(file string) (string, error) {
path, err := exec.LookPath(file) path, err := exec.LookPath(file)
if err != nil { if err != nil && !isGo119ErrDot(err) {
return "", err return "", err
} }
if filepath.Base(file) == file && !filepath.IsAbs(path) { if filepath.Base(file) == file && !filepath.IsAbs(path) {

12
vendor/golang.org/x/sys/execabs/execabs_go118.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.19
// +build !go1.19
package execabs
func isGo119ErrDot(err error) bool {
return false
}

15
vendor/golang.org/x/sys/execabs/execabs_go119.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package execabs
import "strings"
func isGo119ErrDot(err error) bool {
// TODO: return errors.Is(err, exec.ErrDot)
return strings.Contains(err.Error(), "current directory")
}

View File

@ -113,5 +113,6 @@ func (tv *Timeval) Nano() int64 {
// use is a no-op, but the compiler cannot see that it is. // use is a no-op, but the compiler cannot see that it is.
// Calling use(p) ensures that p is kept live until that point. // Calling use(p) ensures that p is kept live until that point.
//
//go:noescape //go:noescape
func use(p unsafe.Pointer) func use(p unsafe.Pointer)

View File

@ -115,6 +115,7 @@ func Write(fd int, p []byte) (n int, err error) {
var ioSync int64 var ioSync int64
//sys fd2path(fd int, buf []byte) (err error) //sys fd2path(fd int, buf []byte) (err error)
func Fd2path(fd int) (path string, err error) { func Fd2path(fd int) (path string, err error) {
var buf [512]byte var buf [512]byte
@ -126,6 +127,7 @@ func Fd2path(fd int) (path string, err error) {
} }
//sys pipe(p *[2]int32) (err error) //sys pipe(p *[2]int32) (err error)
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
if len(p) != 2 { if len(p) != 2 {
return syscall.ErrorString("bad arg in system call") return syscall.ErrorString("bad arg in system call")
@ -180,6 +182,7 @@ func (w Waitmsg) ExitStatus() int {
} }
//sys await(s []byte) (n int, err error) //sys await(s []byte) (n int, err error)
func Await(w *Waitmsg) (err error) { func Await(w *Waitmsg) (err error) {
var buf [512]byte var buf [512]byte
var f [5][]byte var f [5][]byte
@ -301,42 +304,49 @@ func Getgroups() (gids []int, err error) {
} }
//sys open(path string, mode int) (fd int, err error) //sys open(path string, mode int) (fd int, err error)
func Open(path string, mode int) (fd int, err error) { func Open(path string, mode int) (fd int, err error) {
fixwd() fixwd()
return open(path, mode) return open(path, mode)
} }
//sys create(path string, mode int, perm uint32) (fd int, err error) //sys create(path string, mode int, perm uint32) (fd int, err error)
func Create(path string, mode int, perm uint32) (fd int, err error) { func Create(path string, mode int, perm uint32) (fd int, err error) {
fixwd() fixwd()
return create(path, mode, perm) return create(path, mode, perm)
} }
//sys remove(path string) (err error) //sys remove(path string) (err error)
func Remove(path string) error { func Remove(path string) error {
fixwd() fixwd()
return remove(path) return remove(path)
} }
//sys stat(path string, edir []byte) (n int, err error) //sys stat(path string, edir []byte) (n int, err error)
func Stat(path string, edir []byte) (n int, err error) { func Stat(path string, edir []byte) (n int, err error) {
fixwd() fixwd()
return stat(path, edir) return stat(path, edir)
} }
//sys bind(name string, old string, flag int) (err error) //sys bind(name string, old string, flag int) (err error)
func Bind(name string, old string, flag int) (err error) { func Bind(name string, old string, flag int) (err error) {
fixwd() fixwd()
return bind(name, old, flag) return bind(name, old, flag)
} }
//sys mount(fd int, afd int, old string, flag int, aname string) (err error) //sys mount(fd int, afd int, old string, flag int, aname string) (err error)
func Mount(fd int, afd int, old string, flag int, aname string) (err error) { func Mount(fd int, afd int, old string, flag int, aname string) (err error) {
fixwd() fixwd()
return mount(fd, afd, old, flag, aname) return mount(fd, afd, old, flag, aname)
} }
//sys wstat(path string, edir []byte) (err error) //sys wstat(path string, edir []byte) (err error)
func Wstat(path string, edir []byte) (err error) { func Wstat(path string, edir []byte) (err error) {
fixwd() fixwd()
return wstat(path, edir) return wstat(path, edir)

View File

@ -8,7 +8,6 @@
package unix package unix
import ( import (
"bytes"
"unsafe" "unsafe"
) )
@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) {
// Name returns the interface name associated with the Ifreq. // Name returns the interface name associated with the Ifreq.
func (ifr *Ifreq) Name() string { func (ifr *Ifreq) Name() string {
// BytePtrToString requires a NULL terminator or the program may crash. If return ByteSliceToString(ifr.raw.Ifrn[:])
// one is not present, just return the empty string.
if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) {
return ""
}
return BytePtrToString(&ifr.raw.Ifrn[0])
} }
// According to netdevice(7), only AF_INET addresses are returned for numerous // According to netdevice(7), only AF_INET addresses are returned for numerous

View File

@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) {
} }
//sys utimes(path string, times *[2]Timeval) (err error) //sys utimes(path string, times *[2]Timeval) (err error)
func Utimes(path string, tv []Timeval) error { func Utimes(path string, tv []Timeval) error {
if len(tv) != 2 { if len(tv) != 2 {
return EINVAL return EINVAL
@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error {
} }
//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
func UtimesNano(path string, ts []Timespec) error { func UtimesNano(path string, ts []Timespec) error {
if len(ts) != 2 { if len(ts) != 2 {
return EINVAL return EINVAL
@ -300,11 +302,13 @@ func direntNamlen(buf []byte) (uint64, bool) {
} }
//sys getdirent(fd int, buf []byte) (n int, err error) //sys getdirent(fd int, buf []byte) (n int, err error)
func Getdents(fd int, buf []byte) (n int, err error) { func Getdents(fd int, buf []byte) (n int, err error) {
return getdirent(fd, buf) return getdirent(fd, buf)
} }
//sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) //sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error)
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
var status _C_int var status _C_int
var r Pid_t var r Pid_t
@ -372,6 +376,7 @@ func (w WaitStatus) TrapCause() int { return -1 }
//sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range //sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range
func Fsync(fd int) error { func Fsync(fd int) error {
return fsyncRange(fd, O_SYNC, 0, 0) return fsyncRange(fd, O_SYNC, 0, 0)
} }
@ -536,6 +541,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
//sys Getsystemcfg(label int) (n uint64) //sys Getsystemcfg(label int) (n uint64)
//sys umount(target string) (err error) //sys umount(target string) (err error)
func Unmount(target string, flags int) (err error) { func Unmount(target string, flags int) (err error) {
if flags != 0 { if flags != 0 {
// AIX doesn't have any flags for umount. // AIX doesn't have any flags for umount.

View File

@ -504,6 +504,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error)
//sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknod(path string, mode uint32, dev int) (err error)
//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Open(path string, mode int, perm uint32) (fd int, err error)
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
//sys Pathconf(path string, name int) (val int, err error) //sys Pathconf(path string, name int) (val int, err error)
@ -572,7 +573,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
// Nfssvc // Nfssvc
// Getfh // Getfh
// Quotactl // Quotactl
// Mount
// Csops // Csops
// Waitid // Waitid
// Add_profil // Add_profil

View File

@ -125,11 +125,13 @@ func Pipe2(p []int, flags int) (err error) {
} }
//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
func pread(fd int, p []byte, offset int64) (n int, err error) { func pread(fd int, p []byte, offset int64) (n int, err error) {
return extpread(fd, p, 0, offset) return extpread(fd, p, 0, offset)
} }
//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) //sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
func pwrite(fd int, p []byte, offset int64) (n int, err error) { func pwrite(fd int, p []byte, offset int64) (n int, err error) {
return extpwrite(fd, p, 0, offset) return extpwrite(fd, p, 0, offset)
} }

View File

@ -81,6 +81,7 @@ func Pipe(p []int) (err error) {
} }
//sysnb pipe2(p *[2]_C_int, flags int) (err error) //sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) error { func Pipe2(p []int, flags int) error {
if len(p) != 2 { if len(p) != 2 {
return EINVAL return EINVAL
@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error {
} }
//sys Getdents(fd int, buf []byte) (n int, err error) //sys Getdents(fd int, buf []byte) (n int, err error)
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
n, err = Getdents(fd, buf) n, err = Getdents(fd, buf)
if err != nil || basep == nil { if err != nil || basep == nil {

View File

@ -5,7 +5,7 @@
// +build 386,linux // +build 386,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build amd64,linux // +build amd64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build arm,linux // +build arm,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build arm64,linux // +build arm64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build loong64,linux // +build loong64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build mips,linux // +build mips,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build mips64,linux // +build mips64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build mips64le,linux // +build mips64le,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build mipsle,linux // +build mipsle,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build ppc,linux // +build ppc,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build ppc64,linux // +build ppc64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build ppc64le,linux // +build ppc64le,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build riscv64,linux // +build riscv64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build s390x,linux // +build s390x,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go
package unix package unix

View File

@ -5,7 +5,7 @@
// +build sparc64,linux // +build sparc64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT. // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix package unix

View File

@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(fsType)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(dir)
if err != nil {
return
}
_, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_mount_trampoline_addr uintptr
//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) { func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte var _p0 *byte
_p0, err = BytePtrFromString(path) _p0, err = BytePtrFromString(path)

View File

@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mount(SB)
GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB) JMP libc_open(SB)

View File

@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(fsType)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(dir)
if err != nil {
return
}
_, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_mount_trampoline_addr uintptr
//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) { func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte var _p0 *byte
_p0, err = BytePtrFromString(path) _p0, err = BytePtrFromString(path)

View File

@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mount(SB)
GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB) JMP libc_open(SB)

View File

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT. // Code generated by the command above; see README.md. DO NOT EDIT.
//go:build 386 && linux //go:build 386 && linux

Some files were not shown because too many files have changed in this diff Show More