Compare commits

..

98 Commits

Author SHA1 Message Date
Daniel J Walsh
2b4097bc13 Bump to v1.2.0
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-09-21 14:26:24 -04:00
Daniel J Walsh
8151b89b81 Merge pull request #1038 from alvistack/master-linux-amd64
Update nix pin with `make nixpkgs`
2020-09-21 13:53:14 -04:00
Daniel J Walsh
cbd7fb7d37 Merge pull request #1045 from containers/dependabot/go_modules/github.com/containers/image/v5-5.6.0
Bump github.com/containers/image/v5 from 5.5.2 to 5.6.0
2020-09-21 13:51:53 -04:00
Miloslav Trmač
77293ff9c4 Merge pull request #1047 from airadier/registry-token-cli-flag
Add --registry-token flags to support Bearer token authentication
2020-09-18 21:32:24 +02:00
Alvaro Iradier
467b462b79 Keep options order in code and add missing bash completions 2020-09-18 20:57:02 +02:00
Alvaro Iradier
242b573f9a Adding periods 2020-09-18 18:11:01 +02:00
Alvaro Iradier
2d5f12b9a6 Add --registry-token tests to utils_tests.go
Signed-off-by: Alvaro Iradier <airadier@gmail.com>
2020-09-18 12:36:54 +02:00
Alvaro Iradier
3c73c0c0cd Add --registry-token flags to support Bearer token authentication
Signed-off-by: Alvaro Iradier <airadier@gmail.com>
2020-09-18 11:42:54 +02:00
Wong Hoi Sing Edison
ec17cfcbf1 Update nix pin with make nixpkgs
Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
2020-09-17 09:04:41 +08:00
dependabot-preview[bot]
1d0b1671f8 Bump github.com/containers/image/v5 from 5.5.2 to 5.6.0
Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.5.2 to 5.6.0.
- [Release notes](https://github.com/containers/image/releases)
- [Commits](https://github.com/containers/image/compare/v5.5.2...v5.6.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-09-15 06:16:04 -04:00
Daniel J Walsh
bbd800f974 Merge pull request #1042 from rhatdan/codespell
Fix problems found by codespell
2020-09-15 06:14:41 -04:00
Miloslav Trmač
12ab19f5fd Merge pull request #1043 from containers/dependabot/go_modules/github.com/containers/common-0.22.0
Bump github.com/containers/common from 0.21.0 to 0.22.0
2020-09-14 17:51:36 +02:00
dependabot-preview[bot]
05d172a1f5 Bump github.com/containers/common from 0.21.0 to 0.22.0
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.21.0 to 0.22.0.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.21.0...v0.22.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-09-14 16:43:36 +02:00
Daniel J Walsh
45a9efb37f Merge pull request #1039 from containers/dependabot/go_modules/github.com/containers/storage-1.23.5
Bump github.com/containers/storage from 1.23.4 to 1.23.5
2020-09-11 14:49:44 -04:00
Daniel J Walsh
62bafb102d Fix problems found by codespell
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-09-11 10:19:16 -04:00
dependabot-preview[bot]
4eda1d092d Bump github.com/containers/storage from 1.23.4 to 1.23.5
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.23.4 to 1.23.5.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.23.4...v1.23.5)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-09-11 06:18:49 -04:00
Daniel J Walsh
5dd09d76c3 Merge pull request #1033 from containers/dependabot/go_modules/github.com/containers/storage-1.23.4
Bump github.com/containers/storage from 1.23.3 to 1.23.4
2020-09-09 15:57:59 -04:00
Miloslav Trmač
23cb1b7f19 Remove an obsolete documentation of (make binary-static)
... which no longer works after #932.

This does not add documentation for the current static build approach,
nor does it add any other place where DISABLE_CGO is documented;
both are not tested by CI, and discouraged due to bad integration
with the rest of the system.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-09-09 10:14:50 +02:00
dependabot-preview[bot]
c1f984a176 Bump github.com/containers/storage from 1.23.3 to 1.23.4
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.23.3 to 1.23.4.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.23.3...v1.23.4)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-09-08 06:05:35 -04:00
Daniel J Walsh
662f9ac8f7 Merge pull request #1031 from containers/dependabot/go_modules/github.com/containers/common-0.21.0
Bump github.com/containers/common from 0.20.3 to 0.21.0
2020-09-02 13:34:33 -04:00
Daniel J Walsh
ae26454014 Merge pull request #1008 from myback/master
fix build in docker container
2020-09-02 12:47:30 -04:00
Daniel J Walsh
5e1d64825c Merge pull request #1026 from alvistack/master-linux-amd64
Update nix pin with `make nixpkgs`
2020-09-02 12:46:36 -04:00
dependabot-preview[bot]
8767e73fe9 Bump github.com/containers/common from 0.20.3 to 0.21.0
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.20.3 to 0.21.0.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.20.3...v0.21.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-09-02 12:45:59 -04:00
Wong Hoi Sing Edison
071462199d Update nix pin with make nixpkgs
Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
2020-09-01 21:09:48 +08:00
myback
3bb23e355e use base image golang for build 2020-08-31 22:53:25 +07:00
Daniel J Walsh
c4998ebf3f Merge pull request #1027 from containers/dependabot/go_modules/github.com/containers/storage-1.23.2
Bump github.com/containers/storage from 1.23.1 to 1.23.2
2020-08-28 08:28:35 -04:00
Daniel J Walsh
a13b581760 Merge pull request #1028 from jarda-wien/master
Fix skopeo-login docs typo
2020-08-27 05:49:04 -04:00
dependabot-preview[bot]
c8c8d5db78 Bump github.com/containers/storage from 1.23.1 to 1.23.2
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.23.1 to 1.23.2.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.23.1...v1.23.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-27 05:48:03 -04:00
Jaroslav Stepanek
ad3d4aecbb Fix skopeo-login docs typo 2020-08-26 15:57:31 +02:00
Daniel J Walsh
87484a1754 Merge pull request #1023 from containers/dependabot/go_modules/github.com/containers/storage-1.23.1
Bump github.com/containers/storage from 1.23.0 to 1.23.1
2020-08-24 06:31:10 -04:00
Daniel J Walsh
58b9ec9e08 Merge pull request #1024 from containers/dependabot/go_modules/github.com/containers/common-0.20.3
Bump github.com/containers/common from 0.20.2 to 0.20.3
2020-08-24 06:30:50 -04:00
dependabot-preview[bot]
6911642122 Bump github.com/containers/storage from 1.23.0 to 1.23.1
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.23.0 to 1.23.1.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.23.0...v1.23.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-23 06:49:29 -04:00
dependabot-preview[bot]
3ede91cca6 Bump github.com/containers/common from 0.20.2 to 0.20.3
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.20.2 to 0.20.3.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.20.2...v0.20.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-23 06:48:48 -04:00
Daniel J Walsh
5d5756cc83 Merge pull request #1020 from containers/dependabot/go_modules/github.com/containers/image/v5-5.5.2
Bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
2020-08-19 08:54:32 -04:00
Daniel J Walsh
5ad62b9415 Bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.5.1 to 5.5.2.
- [Release notes](https://github.com/containers/image/releases)
- [Commits](https://github.com/containers/image/compare/v5.5.1...v5.5.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-19 08:02:35 -04:00
Daniel J Walsh
88c8c47ce0 Merge pull request #1016 from XiaodongLoong/build-on-mips
-buildmode=pie is not supported for some arch
2020-08-19 07:59:41 -04:00
Daniel J Walsh
e4f656616c Merge pull request #1017 from containers/dependabot/go_modules/github.com/containers/common-0.20.2
Bump github.com/containers/common from 0.18.0 to 0.20.2
2020-08-19 07:56:07 -04:00
dependabot-preview[bot]
b05933fbc4 Bump github.com/containers/common from 0.18.0 to 0.20.2
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.18.0 to 0.20.2.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.18.0...v0.20.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-18 08:52:41 -04:00
Xiaodong Liu
e5f549099b -buildmode=pie is not supported for some arch
Signed-off-by: Xiaodong Liu <liuxiaodong@loongson.cn>
2020-08-14 17:52:48 +08:00
Daniel J Walsh
ea10e61f7d Merge pull request #973 from alvistack/master-linux-amd64
Build static binary with `buildGoModule`
2020-08-11 17:42:05 -04:00
Wong Hoi Sing Edison
915f40d12a Build static binary with buildGoModule
Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
2020-08-11 14:10:43 +08:00
Daniel J Walsh
0c2c7f4016 Update docs/skopeo.1.md
Co-authored-by: Tom Sweeney <tsweeney@redhat.com>
2020-08-10 09:45:08 +02:00
Miloslav Trmač
135ce43169 Add oci-archive to transport list, and link to the authoritative man page
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-08-10 09:45:08 +02:00
Daniel J Walsh
0f94dbcdb3 Merge pull request #1004 from mtrmac/copy-example
Use an inter-registry copy as the example for (skopeo copy)
2020-08-09 08:51:43 -04:00
Daniel J Walsh
f30bab47e6 Merge pull request #1003 from mtrmac/copy-independent
Add an extra clarification to skopeo-copy(1)
2020-08-09 08:50:29 -04:00
Miloslav Trmač
baeaad61d9 Merge pull request #1002 from QiWang19/common-retry
Use c/common retry package
2020-08-08 08:04:35 +02:00
Miloslav Trmač
c750be0107 Use an inter-registry copy as the example for (skopeo copy)
because that's what users are looking for, and instead of using
a containers-storage: source, which might not even work all that
well with all the automatic defaults Podman sets up.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-08-08 04:22:24 +02:00
Qi Wang
84d051fc01 Use c/common retry package
Use c/common retry package.

Signed-off-by: Qi Wang <qiwan@redhat.com>
2020-08-07 21:17:05 -04:00
Miloslav Trmač
56f8222e12 Add an extra clarification to skopeo-copy(1)
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-08-08 02:24:46 +02:00
Miloslav Trmač
78d2f67016 Merge pull request #1001 from containers/dependabot/go_modules/github.com/containers/storage-1.22.0
Bump github.com/containers/storage from 1.21.2 to 1.22.0
2020-08-08 00:33:00 +02:00
dependabot-preview[bot]
c24363ccda Bump github.com/containers/storage from 1.21.2 to 1.22.0
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.21.2 to 1.22.0.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.21.2...v1.22.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-07 08:21:01 -04:00
Daniel J Walsh
c052ed7ec8 Merge pull request #992 from rhatdan/inspect
Make InspectOutput an external object
2020-08-03 13:26:18 -04:00
Miloslav Trmač
5e88eb5761 Merge pull request #995 from rhatdan/master
A couple of minor code cleanups.
2020-08-03 15:42:57 +02:00
Daniel J Walsh
4fb724fb7b Make InspectOutput an external object
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-02 07:08:48 -04:00
Daniel J Walsh
e23b780072 Fix make clean to actually remove binaries
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-02 07:04:51 -04:00
Daniel J Walsh
d9058b3021 Switch containers/libpod->containers/podman
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-08-02 07:04:51 -04:00
Daniel J Walsh
62fd5a76e1 Merge pull request #988 from containers/dependabot/go_modules/github.com/containers/common-0.18.0
Bump github.com/containers/common from 0.16.0 to 0.18.0
2020-07-31 07:34:20 -04:00
Daniel J Walsh
6252c22112 Merge pull request #994 from mrueg/dockerfile.build
Dockerfile.build: Upgrade to Ubuntu 20.04
2020-07-30 18:14:26 -04:00
Manuel Rüger
26e6db1cc7 Dockerfile.build: Upgrade to Ubuntu 20.04
19.10 is EOL since July 17, 2020

Signed-off-by: Manuel Rüger <manuel@rueg.eu>
2020-07-29 15:10:52 +02:00
dependabot-preview[bot]
b7cdcb00ac Bump github.com/containers/common from 0.16.0 to 0.18.0
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.16.0 to 0.18.0.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.16.0...v0.18.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-07-23 08:28:23 -04:00
Daniel J Walsh
153f18dc0a Merge pull request #986 from containers/dependabot/go_modules/github.com/containers/storage-1.21.2
Bump github.com/containers/storage from 1.21.1 to 1.21.2
2020-07-23 08:18:54 -04:00
dependabot-preview[bot]
4012d0e30c Bump github.com/containers/storage from 1.21.1 to 1.21.2
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.21.1 to 1.21.2.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.21.1...v1.21.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-07-22 06:57:31 -04:00
Miloslav Trmač
494d237789 Merge pull request #977 from QiWang19/commands-retry
Retry on skopeo subcommands
2020-07-18 21:55:52 +02:00
Qi Wang
84c53d104a Retry on skopeo subcommands
Applies retry on more skopeo subcommands.
Related issue: https://github.com/containers/skopeo/issues/918

Signed-off-by: Qi Wang <qiwan@redhat.com>
2020-07-17 12:04:20 -04:00
Daniel J Walsh
89fb89a456 Merge pull request #980 from containers/dependabot/go_modules/github.com/containers/common-0.16.0
Bump github.com/containers/common from 0.15.2 to 0.16.0
2020-07-17 07:11:45 -04:00
dependabot-preview[bot]
960b610ff6 Bump github.com/containers/common from 0.15.2 to 0.16.0
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.15.2 to 0.16.0.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.15.2...v0.16.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-07-17 00:29:35 +02:00
Daniel J Walsh
29eec32795 Merge pull request #976 from containers/dependabot/go_modules/github.com/containers/storage-1.21.1
Bump github.com/containers/storage from 1.21.0 to 1.21.1
2020-07-16 17:16:50 -04:00
Miloslav Trmač
2fa7b998ba Merge pull request #978 from vrothberg/update-x/text
vendor golang.org/x/text@v0.3.3
2020-07-16 19:01:56 +02:00
Valentin Rothberg
ebc438266d vendor golang.org/x/text@v0.3.3
Fixes: CVE-2020-14040
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
2020-07-16 11:17:54 +02:00
dependabot-preview[bot]
8f5eb45ba6 Bump github.com/containers/storage from 1.21.0 to 1.21.1
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.21.0 to 1.21.1.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.21.0...v1.21.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-07-14 21:26:24 -04:00
Daniel J Walsh
6284ceb2b6 Merge pull request #974 from containers/dependabot/go_modules/github.com/containers/storage-1.21.0
Bump github.com/containers/storage from 1.20.2 to 1.21.0
2020-07-11 09:29:08 -04:00
Daniel J Walsh
5e2264d2b5 Bump github.com/containers/storage from 1.20.2 to 1.21.0
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.20.2 to 1.21.0.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.20.2...v1.21.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-07-11 07:38:43 -04:00
Daniel J Walsh
6e295a2097 Merge pull request #975 from containers/dependabot/go_modules/github.com/hashicorp/go-multierror-1.1.0
Bump github.com/hashicorp/go-multierror from 1.0.0 to 1.1.0
2020-07-11 07:36:58 -04:00
dependabot-preview[bot]
19f9a5c2fa Bump github.com/hashicorp/go-multierror from 1.0.0 to 1.1.0
Bumps [github.com/hashicorp/go-multierror](https://github.com/hashicorp/go-multierror) from 1.0.0 to 1.1.0.
- [Release notes](https://github.com/hashicorp/go-multierror/releases)
- [Commits](https://github.com/hashicorp/go-multierror/compare/v1.0.0...v1.1.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-07-10 06:42:30 -04:00
Daniel J Walsh
f63685f3c8 Merge pull request #971 from containers/dependabot/go_modules/github.com/containers/ocicrypt-1.0.3
Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
2020-07-10 06:41:12 -04:00
Miloslav Trmač
dc5f68fe5f Merge pull request #933 from QiWang19/retry
Retry skopeo inspect command
2020-07-09 18:56:51 +02:00
Qi Wang
0858cafffc Retry skopeo inspect command
Enables to retry skopeo inspect. Add `--retry-times` to set the number of times to retry. Use exponential backoff and  1s as default initial retry delay.

Signed-off-by: Qi Wang <qiwan@redhat.com>
2020-07-09 11:57:13 -04:00
dependabot-preview[bot]
2e343342d5 Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
Bumps [github.com/containers/ocicrypt](https://github.com/containers/ocicrypt) from 1.0.2 to 1.0.3.
- [Release notes](https://github.com/containers/ocicrypt/releases)
- [Commits](https://github.com/containers/ocicrypt/compare/v1.0.2...v1.0.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-07-09 16:31:10 +02:00
Miloslav Trmač
840c48752e Merge pull request #972 from mtrmac/brew-test
Fix macOS builds in Travis
2020-07-09 16:30:02 +02:00
Miloslav Trmač
0382b01687 Fix macOS builds in Travis
... which are currently failing with
> Error: The `brew link` step did not complete successfully
> The formula built, but is not symlinked into /usr/local
> Could not symlink Frameworks/Python.framework/Headers
> Target /usr/local/Frameworks/Python.framework/Headers
> is a symlink belonging to python@2. You can unlink it:
>   brew unlink python@2

because the Travis-installed machine apparently has quite a few
Homebrew formulae installed, with an old version of Homebrew,
including a now-removed python@2, and that prevents updates of
python@3.

Remove the obsolete motivation for running (brew update), and replace it
with a similarly-good motivation that the Travis images are just too old
to be relevant to users.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-07-09 02:17:50 +02:00
Miloslav Trmač
ee72e803ec Merge pull request #969 from containers/dependabot/go_modules/github.com/containers/common-0.15.2
Bump github.com/containers/common from 0.15.1 to 0.15.2
2020-07-08 23:19:06 +02:00
dependabot-preview[bot]
142142c040 Bump github.com/containers/common from 0.15.1 to 0.15.2
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.15.1 to 0.15.2.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.15.1...v0.15.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-07-06 14:53:30 -04:00
Daniel J Walsh
6182aa30b1 Merge pull request #964 from alvistack/master-linux-amd64
nix run -f channel:nixos-20.03
2020-07-02 06:11:50 -04:00
Daniel J Walsh
ec9f8acf00 Merge pull request #967 from containers/dependabot/go_modules/github.com/containers/common-0.15.1
Bump github.com/containers/common from 0.14.3 to 0.15.1
2020-07-02 06:10:46 -04:00
dependabot-preview[bot]
52b3a5bacc Bump github.com/containers/common from 0.14.3 to 0.15.1
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.14.3 to 0.15.1.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.14.3...v0.15.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-07-01 20:59:28 +02:00
Miloslav Trmač
ac6b871f66 Merge pull request #966 from lumjjb/doc_Change
update enc/dec docs to be consistent with buildah
2020-07-01 20:52:49 +02:00
Brandon Lum
b17fb08f8b update enc/dec docs to be consistent with buildah
Signed-off-by: Brandon Lum <lumjjb@gmail.com>
2020-06-30 21:46:57 +00:00
Wong Hoi Sing Edison
dd2e70e9b7 nix run -f channel:nixos-20.03
Switch from nix `channel:nixpkgs-unstable` to `channel:nixos-20.03` for better stability.

Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
2020-06-29 12:07:46 +08:00
Miloslav Trmač
ba8cbf589b Merge pull request #963 from containers/dependabot/go_modules/github.com/containers/common-0.14.3
Bump github.com/containers/common from 0.14.0 to 0.14.3
2020-06-26 20:01:02 +02:00
dependabot-preview[bot]
91dc0f3f4c Bump github.com/containers/common from 0.14.0 to 0.14.3
Bumps [github.com/containers/common](https://github.com/containers/common) from 0.14.0 to 0.14.3.
- [Release notes](https://github.com/containers/common/releases)
- [Commits](https://github.com/containers/common/compare/v0.14.0...v0.14.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-06-26 06:06:56 -04:00
Daniel J Walsh
7815c8ac6f Merge pull request #952 from rhatdan/build
Cleanup Dockerfile builds
2020-06-22 11:21:21 -04:00
Miloslav Trmač
233e61cf9a Merge pull request #960 from mtrmac/htpasswd
Run htpasswd from our build-container instead of registry:2
2020-06-22 15:23:03 +02:00
Miloslav Trmač
0e2611d3a6 Run htpasswd from our build-container instead of registry:2
registry:2 no longer contains htpasswd.

Also don't use log_and_run ... >> $file
because that will cause the command to be logged to $file.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-06-19 19:44:52 +02:00
Daniel J Walsh
96bd4a0619 Merge pull request #958 from rhatdan/VERSION
Bump to v1.1.0
2020-06-18 15:28:31 -04:00
Daniel J Walsh
6b78619cd1 Merge pull request #932 from alvistack/master-linux-amd64
[nix] Add nix derivation for static builds
2020-06-18 10:24:32 -04:00
Wong Hoi Sing Edison
0f458eec76 Add nix derivation for static builds
Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
2020-06-18 20:57:55 +08:00
Daniel J Walsh
6b960ec031 Cleanup Dockerfile builds
Add `-y` options to yum clean all
Only delete below /var/cache/dnf so that I can use the
-v /var/cache/dnf:/var/cache/dnf:O option when building
to speed up builds.

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-06-17 17:22:58 -04:00
Daniel J Walsh
fdc58131f8 Move to v1.1.1-dev
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2020-06-17 16:41:59 -04:00
302 changed files with 9849 additions and 9333 deletions

3
.gitignore vendored
View File

@@ -1,6 +1,7 @@
*.1
/layers-*
/skopeo
result
# ignore JetBrains IDEs (GoLand) config folder
.idea
.idea

View File

@@ -15,13 +15,12 @@ notifications:
email: false
install:
# NOTE: The (brew update) should not be necessary, and slows things down;
# we include it as a workaround for https://github.com/Homebrew/brew/issues/3299
# ideally Travis should bake the (brew update) into its images
# (https://github.com/travis-ci/travis-ci/issues/8552 ), but thats only going
# to happen around November 2017 per https://blog.travis-ci.com/2017-10-16-a-new-default-os-x-image-is-coming .
# Remove the (brew update) at that time.
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew install gpgme ; fi
# Ideally, the (brew update) should not be necessary and Travis would have fairly
# frequenstly updated OS images; thats not been the case historically.
# In particular, explicitly unlink python@2, which has been removed from Homebrew
# since the last OS image build (as of July 2020), but the Travis OS still
# contains it, and it prevents updating of Python 3.
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew unlink python@2 && brew install gpgme ; fi
script:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi

View File

@@ -134,7 +134,7 @@ When new PRs for [containers/image](https://github.com/containers/image) break `
- create out a new branch in your `skopeo` checkout and switch to it
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
- run `make vendor`
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now required by `containers/image`, or update skopeo for changed `containers/image` API)
- optionally add new integration tests to the skopeo repo
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
- iterate until tests pass and the PR is reviewed

View File

@@ -1,13 +1,9 @@
FROM ubuntu:19.10
FROM golang:1.14-buster
RUN apt-get update && apt-get install -y \
golang \
libbtrfs-dev \
git-core \
libdevmapper-dev \
libgpgme11-dev \
go-md2man \
libglib2.0-dev
RUN apt-get update && \
apt-get install -y \
libdevmapper-dev \
libgpgme11-dev
ENV GOPATH=/
WORKDIR /src/github.com/containers/skopeo

View File

@@ -25,6 +25,9 @@ BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
GO ?= go
GOBIN := $(shell $(GO) env GOBIN)
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
ifeq ($(GOBIN),)
GOBIN := $(GOPATH)/bin
endif
@@ -43,8 +46,10 @@ ifeq ($(DEBUG), 1)
override GOGCFLAGS += -N -l
endif
ifeq ($(shell $(GO) env GOOS), linux)
GO_DYN_FLAGS="-buildmode=pie"
ifeq ($(GOOS), linux)
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
GO_DYN_FLAGS="-buildmode=pie"
endif
endif
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
@@ -62,6 +67,9 @@ CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
EXTRA_LDFLAGS ?=
LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
MANPAGES_MD = $(wildcard docs/*.md)
MANPAGES ?= $(MANPAGES_MD:%.md=%)
@@ -85,7 +93,8 @@ help:
@echo
@echo " * 'install' - Install binaries and documents to system locations"
@echo " * 'binary' - Build skopeo with a container"
@echo " * 'binary-local' - Build skopeo locally"
@echo " * 'static' - Build statically linked binary"
@echo " * 'bin/skopeo' - Build skopeo locally"
@echo " * 'test-unit' - Execute unit tests"
@echo " * 'test-integration' - Execute integration tests"
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
@@ -98,19 +107,25 @@ help:
binary: cmd/skopeo
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
skopeobuildimage make bin/skopeo $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
binary-static: cmd/skopeo
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
# Update nix/nixpkgs.json its latest stable commit
.PHONY: nixpkgs
nixpkgs:
@nix run -f channel:nixos-20.03 nix-prefetch-git -c nix-prefetch-git \
--no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json
# Build statically linked binary
.PHONY: static
static:
@nix build -f nix/
mkdir -p ./bin
cp -rfp ./result/bin/* ./bin/
# Build w/o using containers
binary-local:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
binary-local-static:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
.PHONY: bin/skopeo
bin/skopeo:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
build-container:
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
@@ -126,7 +141,7 @@ docs-in-container:
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
clean:
rm -f skopeo docs/*.1
rm -rf bin docs/*.1
install: install-binary install-docs install-completions
install -d -m 755 ${SIGSTOREDIR}
@@ -135,9 +150,9 @@ install: install-binary install-docs install-completions
install -d -m 755 ${REGISTRIESDDIR}
install -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
install-binary: ./skopeo
install-binary: bin/skopeo
install -d -m 755 ${INSTALLDIR}
install -m 755 skopeo ${INSTALLDIR}/skopeo
install -m 755 bin/skopeo ${INSTALLDIR}/skopeo
install-docs: docs
install -d -m 755 ${MANINSTALLDIR}/man1

View File

@@ -6,6 +6,7 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
@@ -22,6 +23,7 @@ type copyOptions struct {
global *globalOptions
srcImage *imageOptions
destImage *imageDestOptions
retryOpts *retry.RetryOptions
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
@@ -37,9 +39,11 @@ func copyCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := copyOptions{global: global,
srcImage: srcOpts,
destImage: destOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "copy [command options] SOURCE-IMAGE DESTINATION-IMAGE",
@@ -52,13 +56,14 @@ Supported transports:
See skopeo(1) section "IMAGE NAMES" for the expected format
`, strings.Join(transports.ListNames(), ", ")),
RunE: commandAction(opts.run),
Example: `skopeo copy --sign-by dev@example.com container-storage:example/busybox:streaming docker://example/busybox:gold`,
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
}
adjustUsage(cmd)
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
flags.StringSliceVar(&opts.additionalTags, "additional-tag", []string{}, "additional tags (supports docker-archive)")
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress output information when copying images")
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
@@ -178,17 +183,19 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
decConfig = cc.DecryptConfig
}
_, err = copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: stdout,
SourceCtx: sourceCtx,
DestinationCtx: destinationCtx,
ForceManifestMIMEType: manifestType,
ImageListSelection: imageListSelection,
OciDecryptConfig: decConfig,
OciEncryptLayers: encLayers,
OciEncryptConfig: encConfig,
})
return err
return retry.RetryIfNecessary(ctx, func() error {
_, err = copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: stdout,
SourceCtx: sourceCtx,
DestinationCtx: destinationCtx,
ForceManifestMIMEType: manifestType,
ImageListSelection: imageListSelection,
OciDecryptConfig: decConfig,
OciEncryptLayers: encLayers,
OciEncryptConfig: encConfig,
})
return err
}, opts.retryOpts)
}

View File

@@ -6,22 +6,26 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/spf13/cobra"
)
type deleteOptions struct {
global *globalOptions
image *imageOptions
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
}
func deleteCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := deleteOptions{
global: global,
image: imageOpts,
global: global,
image: imageOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "delete [command options] IMAGE-NAME",
@@ -38,6 +42,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -63,5 +68,8 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
return ref.DeleteImage(ctx, sys)
return retry.RetryIfNecessary(ctx, func() error {
return ref.DeleteImage(ctx, sys)
}, opts.retryOpts)
}

View File

@@ -5,46 +5,36 @@ import (
"fmt"
"io"
"strings"
"time"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
digest "github.com/opencontainers/go-digest"
"github.com/containers/image/v5/types"
"github.com/containers/skopeo/cmd/skopeo/inspect"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// inspectOutput is the output format of (skopeo inspect), primarily so that we can format it with a simple json.MarshalIndent.
type inspectOutput struct {
Name string `json:",omitempty"`
Tag string `json:",omitempty"`
Digest digest.Digest
RepoTags []string
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string
Os string
Layers []string
Env []string
}
type inspectOptions struct {
global *globalOptions
image *imageOptions
raw bool // Output the raw manifest instead of parsing information about the image
config bool // Output the raw config blob instead of parsing information about the image
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
raw bool // Output the raw manifest instead of parsing information about the image
config bool // Output the raw config blob instead of parsing information about the image
}
func inspectCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := inspectOptions{
global: global,
image: imageOpts,
global: global,
image: imageOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "inspect [command options] IMAGE-NAME",
@@ -64,10 +54,16 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
flags.BoolVar(&opts.config, "config", false, "output configuration")
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
var (
rawManifest []byte
src types.ImageSource
imgInspect *types.ImageInspectInfo
)
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
@@ -85,9 +81,11 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
return err
}
src, err := parseImageSource(ctx, opts.image, imageName)
if err != nil {
return fmt.Errorf("Error parsing image name %q: %v", imageName, err)
if err := retry.RetryIfNecessary(ctx, func() error {
src, err = parseImageSource(ctx, opts.image, imageName)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error parsing image name %q", imageName)
}
defer func() {
@@ -96,9 +94,11 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
}()
rawManifest, _, err := src.GetManifest(ctx, nil)
if err != nil {
return fmt.Errorf("Error retrieving manifest for image: %v", err)
if err := retry.RetryIfNecessary(ctx, func() error {
rawManifest, _, err = src.GetManifest(ctx, nil)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error retrieving manifest for image")
}
if opts.raw && !opts.config {
@@ -115,9 +115,12 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
if opts.config && opts.raw {
configBlob, err := img.ConfigBlob(ctx)
if err != nil {
return fmt.Errorf("Error reading configuration blob: %v", err)
var configBlob []byte
if err := retry.RetryIfNecessary(ctx, func() error {
configBlob, err = img.ConfigBlob(ctx)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error reading configuration blob")
}
_, err = stdout.Write(configBlob)
if err != nil {
@@ -125,9 +128,12 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
return nil
} else if opts.config {
config, err := img.OCIConfig(ctx)
if err != nil {
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
var config *v1.Image
if err := retry.RetryIfNecessary(ctx, func() error {
config, err = img.OCIConfig(ctx)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error reading OCI-formatted configuration data")
}
err = json.NewEncoder(stdout).Encode(config)
if err != nil {
@@ -136,15 +142,18 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
return nil
}
imgInspect, err := img.Inspect(ctx)
if err != nil {
if err := retry.RetryIfNecessary(ctx, func() error {
imgInspect, err = img.Inspect(ctx)
return err
}, opts.retryOpts); err != nil {
return err
}
outputData := inspectOutput{
outputData := inspect.Output{
Name: "", // Set below if DockerReference() is known
Tag: imgInspect.Tag,
// Digest is set below.
RepoTags: []string{}, // Possibly overriden for docker.Transport.
RepoTags: []string{}, // Possibly overridden for docker.Transport.
Created: imgInspect.Created,
DockerVersion: imgInspect.DockerVersion,
Labels: imgInspect.Labels,

View File

@@ -0,0 +1,23 @@
package inspect
import (
"time"
digest "github.com/opencontainers/go-digest"
)
// Output is the output format of (skopeo inspect),
// primarily so that we can format it with a simple json.MarshalIndent.
type Output struct {
Name string `json:",omitempty"`
Tag string `json:",omitempty"`
Digest digest.Digest
RepoTags []string
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string
Os string
Layers []string
Env []string
}

View File

@@ -7,6 +7,7 @@ import (
"os"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/pkg/blobinfocache"
@@ -17,16 +18,19 @@ import (
)
type layersOptions struct {
global *globalOptions
image *imageOptions
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
}
func layersCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := layersOptions{
global: global,
image: imageOpts,
global: global,
image: imageOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Hidden: true,
@@ -38,6 +42,7 @@ func layersCmd(global *globalOptions) *cobra.Command {
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -60,12 +65,20 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
return err
}
cache := blobinfocache.DefaultCache(sys)
rawSource, err := parseImageSource(ctx, opts.image, imageName)
if err != nil {
var (
rawSource types.ImageSource
src types.ImageCloser
)
if err = retry.RetryIfNecessary(ctx, func() error {
rawSource, err = parseImageSource(ctx, opts.image, imageName)
return err
}, opts.retryOpts); err != nil {
return err
}
src, err := image.FromSource(ctx, sys, rawSource)
if err != nil {
if err = retry.RetryIfNecessary(ctx, func() error {
src, err = image.FromSource(ctx, sys, rawSource)
return err
}, opts.retryOpts); err != nil {
if closeErr := rawSource.Close(); closeErr != nil {
return errors.Wrapf(err, " (close error: %v)", closeErr)
}
@@ -129,8 +142,14 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
}()
for _, bd := range blobDigests {
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
if err != nil {
var (
r io.ReadCloser
blobSize int64
)
if err = retry.RetryIfNecessary(ctx, func() error {
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
return err
}, opts.retryOpts); err != nil {
return err
}
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
@@ -141,8 +160,11 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
}
}
manifest, _, err := src.Manifest(ctx)
if err != nil {
var manifest []byte
if err = retry.RetryIfNecessary(ctx, func() error {
manifest, _, err = src.Manifest(ctx)
return err
}, opts.retryOpts); err != nil {
return err
}
if err := dest.PutManifest(ctx, manifest, nil); err != nil {

View File

@@ -7,6 +7,7 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports/alltransports"
@@ -22,17 +23,20 @@ type tagListOutput struct {
}
type tagsOptions struct {
global *globalOptions
image *imageOptions
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
}
func tagsCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := tagsOptions{
global: global,
image: imageOpts,
global: global,
image: imageOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "list-tags [command options] REPOSITORY-NAME",
@@ -51,6 +55,7 @@ See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -118,8 +123,12 @@ func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
return err
}
repositoryName, tagListing, err := listDockerTags(ctx, sys, imgRef)
if err != nil {
var repositoryName string
var tagListing []string
if err = retry.RetryIfNecessary(ctx, func() error {
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
return err
}, opts.retryOpts); err != nil {
return err
}

View File

@@ -11,6 +11,7 @@ import (
"regexp"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
@@ -25,14 +26,15 @@ import (
// syncOptions contains information retrieved from the skopeo sync command line.
type syncOptions struct {
global *globalOptions // Global (not command dependant) skopeo options
global *globalOptions // Global (not command dependent) skopeo options
srcImage *imageOptions // Source image options
destImage *imageDestOptions // Destination image options
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
retryOpts *retry.RetryOptions
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
}
// repoDescriptor contains information of a single repository used as a sync source.
@@ -65,11 +67,13 @@ func syncCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := dockerImageFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := syncOptions{
global: global,
srcImage: srcOpts,
destImage: &imageDestOptions{imageOptions: destOpts},
retryOpts: retryOpts,
}
cmd := &cobra.Command{
@@ -95,6 +99,7 @@ See skopeo-sync(1) for details.
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -509,9 +514,15 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
return err
}
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
sourceArg := args[0]
srcRepoList, err := imagesToCopy(sourceArg, opts.source, sourceCtx)
if err != nil {
var srcRepoList []repoDescriptor
if err = retry.RetryIfNecessary(ctx, func() error {
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
return err
}, opts.retryOpts); err != nil {
return err
}
@@ -521,9 +532,6 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
return err
}
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
imagesNumber := 0
options := copy.Options{
RemoveSignatures: opts.removeSignatures,
@@ -563,8 +571,10 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
"to": transports.ImageName(destRef),
}).Infof("Copying image tag %d/%d", counter+1, len(srcRepo.TaggedImages))
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
if err != nil {
if err = retry.RetryIfNecessary(ctx, func() error {
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error copying tag %q", transports.ImageName(ref))
}
imagesNumber++

View File

@@ -6,6 +6,7 @@ import (
"os"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
@@ -56,6 +57,7 @@ type dockerImageOptions struct {
shared *sharedImageOptions // May be shared across several imageOptions instances.
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
credsOption optionalString // username[:password] for accessing a registry
registryToken optionalString // token to be used directy as a Bearer token when accessing the registry
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
noCreds bool // Access the registry anonymously
@@ -91,6 +93,7 @@ func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, flagPre
f := fs.VarPF(newOptionalStringValue(&flags.credsOption), credsOptionAlias, "", "Use `USERNAME[:PASSWORD]` for accessing the registry")
f.Hidden = true
}
fs.Var(newOptionalStringValue(&flags.registryToken), flagPrefix+"registry-token", "Provide a Bearer token for accessing the registry")
fs.StringVar(&flags.dockerCertPath, flagPrefix+"cert-dir", "", "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon")
optionalBoolFlag(&fs, &flags.tlsVerify, flagPrefix+"tls-verify", "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)")
fs.BoolVar(&flags.noCreds, flagPrefix+"no-creds", false, "Access the registry anonymously")
@@ -108,6 +111,17 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, c
return fs, opts
}
type retryOptions struct {
maxRetry int // The number of times to possibly retry
}
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
opts := retry.RetryOptions{}
fs := pflag.FlagSet{}
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
return fs, &opts
}
// newSystemContext returns a *types.SystemContext corresponding to opts.
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
@@ -138,6 +152,9 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
return nil, err
}
}
if opts.registryToken.present {
ctx.DockerBearerRegistryToken = opts.registryToken.value
}
if opts.noCreds {
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
}
@@ -145,7 +162,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
return ctx, nil
}
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
// imageDestOptions is a superset of imageOptions specialized for image destinations.
type imageDestOptions struct {
*imageOptions
dirForceCompression bool // Compress layers when saving to the dir: transport

View File

@@ -54,6 +54,7 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
"--dest-daemon-host", "daemon-host.example.com",
"--dest-tls-verify=false",
"--dest-creds", "creds-user:creds-password",
"--dest-registry-token", "faketoken",
})
res, err = opts.newSystemContext()
require.NoError(t, err)
@@ -67,6 +68,7 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
DockerCertPath: "/srv/cert-dir",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
DockerBearerRegistryToken: "faketoken",
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,
@@ -164,6 +166,7 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
"--dest-daemon-host", "daemon-host.example.com",
"--dest-tls-verify=false",
"--dest-creds", "creds-user:creds-password",
"--dest-registry-token", "faketoken",
})
res, err = opts.newSystemContext()
require.NoError(t, err)
@@ -177,6 +180,7 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
DockerCertPath: "/srv/cert-dir",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
DockerBearerRegistryToken: "faketoken",
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,

View File

@@ -49,6 +49,8 @@ _skopeo_copy() {
--dest-tls-verify
--src-daemon-host
--dest-daemon-host
--src-registry-token
--dest-registry-token
"
local boolean_options="
@@ -73,6 +75,8 @@ _skopeo_inspect() {
--authfile
--creds
--cert-dir
--retry-times
--registry-token
"
local boolean_options="
--config
@@ -119,6 +123,7 @@ _skopeo_delete() {
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="
--tls-verify
@@ -135,11 +140,14 @@ _skopeo_delete() {
_skopeo_layers() {
local options_with_args="
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="
--tls-verify
--no-creds
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -149,6 +157,7 @@ _skopeo_list_repository_tags() {
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="

View File

@@ -18,7 +18,7 @@ using the latest Fedora and then Skopeo is installed into them:
## Sample Usage
Although not required, it is suggested that [Podman](https://github.com/containers/libpod) be used with these container images.
Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
```
# Get Help on Skopeo

View File

@@ -13,7 +13,7 @@ FROM registry.fedoraproject.org/fedora:32
# up space. Also reinstall shadow-utils as without
# doing so, the setuid/setgid bits on newuidmap
# and newgidmap are lost in the Fedora images.
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf

View File

@@ -14,7 +14,7 @@ FROM registry.fedoraproject.org/fedora:32
# up space. Also reinstall shadow-utils as without
# doing so, the setuid/setgid bits on newuidmap
# and newgidmap are lost in the Fedora images.
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf

View File

@@ -29,11 +29,12 @@ mkdir /root/skopeo; \
git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/containers/skopeo; \
export GOPATH=/root/skopeo; \
cd /root/skopeo/src/github.com/containers/skopeo; \
make binary-local;\
make bin/skopeo;\
make install;\
rm -rf /root/skopeo/*; \
yum -y remove git golang go-md2man make; \
yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf

View File

@@ -15,6 +15,9 @@ Uses the system's trust policy to validate images, rejects images not trusted by
_destination-image_ use the "image name" format described above
_source-image_ and _destination-image_ are interpreted completely independently; e.g. the destination name does not
automatically inherit any parts of the source name.
## OPTIONS
**--all**
@@ -47,29 +50,29 @@ Path of the authentication file for the destination registry. Uses path given by
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
**--encryption-key** _Key_ a reference prefixed with the encryption protocol to use. The supported protocols are JWE, PGP and PKCS7. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file. This feature is still *experimental*.
**--encryption-key** _protocol:keyfile_ specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
**--decryption-key** _Key_ a reference required to perform decryption of container images. This should point to files which represent keys and/or certificates that can be used for decryption. Decryption will be tried with all keys. This feature is still *experimental*.
**--decryption-key** _key[:passphrase]_ to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
**--src-creds** _username[:password]_ for accessing the source registry
**--src-creds** _username[:password]_ for accessing the source registry.
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
**--dest-creds** _username[:password]_ for accessing the destination registry
**--dest-creds** _username[:password]_ for accessing the destination registry.
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
**--src-no-creds** _bool-value_ Access the registry anonymously.
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
**--dest-no-creds** _bool-value_ Access the registry anonymously.
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
@@ -81,8 +84,17 @@ Existing signatures, if any, are preserved as well.
**--dest-compress-level** _format_ Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
**--src-registry-token** _Bearer token_ for accessing the source registry.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
## EXAMPLES
To just copy an image from one registry to another:
```sh
$ skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest
```
To copy the layers of the docker.io busybox image to a local directory:
```sh
$ mkdir -p /var/lib/images/busybox

View File

@@ -24,16 +24,18 @@ $ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distrib
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry
**--creds** _username[:password]_ for accessing the registry.
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--no-creds** _bool-value_ Access the registry anonymously.
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
**--registry-token** _Bearer token_ for accessing the registry.
## EXAMPLES
Mark image example/pause for deletion from the registry.example.com registry:

View File

@@ -25,14 +25,18 @@ Return low-level information about _image-name_ in a registry
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry
**--creds** _username[:password]_ for accessing the registry.
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--no-creds** _bool-value_ Access the registry anonymously.
**--registry-token** _Bearer token_ for accessing the registry.
## EXAMPLES
To review information for the image fedora from the docker.io registry:

View File

@@ -15,14 +15,16 @@ Return a list of tags from _repository-name_ in a registry.
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry
**--creds** _username[:password]_ for accessing the registry.
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--no-creds** _bool-value_ Access the registry anonymously.
**--registry-token** _Bearer token_ for accessing the registry.
## REPOSITORY NAMES
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.

View File

@@ -4,7 +4,7 @@
skopeo\-login - Login to a container registry
## SYNOPSIS
**skoepo login** [*options*] *registry*
**skopeo login** [*options*] *registry*
## DESCRIPTION
**skopeo login** logs into a specified registry server with the correct username

View File

@@ -71,6 +71,10 @@ Path of the authentication file for the destination registry. Uses path given by
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to a container destination registry or daemon (defaults to true).
**--src-registry-token** _Bearer token_ for accessing the source registry.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
## EXAMPLES
### Synchronizing to a local directory

View File

@@ -44,6 +44,11 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
**oci:**_path_**:**_tag_
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
**oci-archive:**_path_**:**_tag_
An image _tag_ in a tar archive compliant with "Open Container Image Layout Specification" at _path_.
See [containers-transports(5)](https://github.com/containers/image/blob/master/docs/containers-transports.5.md) for details.
## OPTIONS
**--command-timeout** _duration_ Timeout for the command execution.

10
go.mod
View File

@@ -3,17 +3,17 @@ module github.com/containers/skopeo
go 1.12
require (
github.com/containers/common v0.14.0
github.com/containers/image/v5 v5.5.1
github.com/containers/ocicrypt v1.0.2
github.com/containers/storage v1.20.2
github.com/containers/common v0.22.0
github.com/containers/image/v5 v5.6.0
github.com/containers/ocicrypt v1.0.3
github.com/containers/storage v1.23.5
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f
github.com/dsnet/compress v0.0.1 // indirect
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
github.com/opencontainers/runtime-spec v1.0.0 // indirect
github.com/opencontainers/runc v1.0.0-rc92 // indirect
github.com/pkg/errors v0.9.1
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/sirupsen/logrus v1.6.0

127
go.sum
View File

@@ -7,8 +7,6 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -23,43 +21,49 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 h1:rG1clvJbgsUcmb50J82YUJhUMopWNtZvyMZjb+4fqGw=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containers/common v0.14.0 h1:hiZFDPf6ajKiDmojN5f5X3gboKPO73NLrYb0RXfrQiA=
github.com/containers/common v0.14.0/go.mod h1:9olhlE+WhYof1npnMJdyRMX14/yIUint6zyHzcyRVAg=
github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE=
github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8=
github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ=
github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/common v0.22.0 h1:MjJIMka4pJddHsfZpQCF7jOmX6vXqMs0ojDeYmPKoSk=
github.com/containers/common v0.22.0/go.mod h1:qsLcLHM7ha5Nc+JDp5duBwfwEfrnlfjXL/K8HO96QHw=
github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/image/v5 v5.6.0 h1:r4AqIX4NO/X7OJkqX574zITV3fq0ZPn0pSlLsxWF6ww=
github.com/containers/image/v5 v5.6.0/go.mod h1:iUSWo3SOLqJo0CkZkKrHxqR6YWqrT98mkXFpE0MceE8=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
github.com/containers/storage v1.19.1 h1:YKIzOO12iaD5Ra0PKFS6emcygbHLmwmQOCQRU/19YAQ=
github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ=
github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
github.com/containers/storage v1.23.5 h1:He9I6y1vRVXYoQg4v2Q9HFAcX4dI3V5MCCrjeBcjkCY=
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -84,7 +88,6 @@ github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5Jflh
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -95,6 +98,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
@@ -105,8 +109,8 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -117,7 +121,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -128,18 +131,19 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@@ -150,17 +154,15 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY=
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -183,12 +185,16 @@ github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJd
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -196,11 +202,11 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@@ -210,19 +216,19 @@ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d h1:X9WSFjjZNqYRqO2MenUgqE2nj/oydcfIzXJ0R/SVnnA=
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA=
github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runc v1.0.0-rc92 h1:+IczUKCRzDzFDnw99O/PAqrcBBCoRp9xN3cB1SYSNS4=
github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg=
github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6 h1:NhsM2gc769rVWDqJvapK37r+7+CBXI8xHhnfnt8uQsg=
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -252,15 +258,15 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -280,18 +286,13 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
@@ -301,19 +302,25 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14=
github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8=
github.com/vbauerster/mpb/v5 v5.2.2 h1:zIICVOm+XD+uV6crpSORaL6I0Q1WqOdvxZTp+r3L9cw=
github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww=
github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
@@ -321,6 +328,10 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -348,9 +359,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -372,23 +381,24 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -407,7 +417,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -438,7 +447,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -448,4 +456,3 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=

View File

@@ -8,7 +8,7 @@ bundle_test_integration() {
# subshell so that we can export PATH without breaking other things
(
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make install
bundle_test_integration
) 2>&1

View File

@@ -11,7 +11,7 @@ sed -i \
/etc/containers/storage.conf
# Build skopeo, install into /usr/bin
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make install
# Run tests

View File

@@ -12,6 +12,6 @@ go version
GO111MODULE=off go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
cd ${_containers}/skopeo
make validate-local test-unit-local binary-local
make validate-local test-unit-local bin/skopeo
sudo make install
skopeo -v

View File

@@ -114,7 +114,7 @@ Make sure to clone this repository in your `GOPATH` - otherwise compilation fail
```bash
$ git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
$ cd $GOPATH/src/github.com/containers/skopeo && make binary-local
$ cd $GOPATH/src/github.com/containers/skopeo && make bin/skopeo
```
### Building in a container
@@ -130,12 +130,6 @@ Building in a container is simpler, but more restrictive:
$ make binary # Or (make all) to also build documentation, see below.
```
To build a pure-Go static binary (disables devicemapper, btrfs, and gpgme):
```bash
$ make binary-static DISABLE_CGO=1
```
### Building documentation
To build the manual you will need go-md2man.

View File

@@ -488,7 +488,7 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
// "push": dir: → atomic:
@@ -509,7 +509,7 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+dir1)
// "push": dir: → docker(v2s2):
@@ -626,7 +626,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
// Since the image is partially encrypted we should find layers that aren't encrypted
matchLayerBlobBinaryType(c, partiallyEncryptedImgDir+"/blobs/sha256", "application/x-gzip", 2)
// Decrypt the partically encrypted image
// Decrypt the partially encrypted image
assertSkopeoSucceeds(c, "", "copy", "--decryption-key", keysDir+"/private.key",
"oci:"+partiallyEncryptedImgDir+":encrypted", "oci:"+partiallyDecryptedImgDir+":decrypted")
@@ -720,7 +720,7 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// streaming: docker: → atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
// Compare (copies of) the original and the copy:
@@ -1087,7 +1087,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
defer os.Remove(policy)
// We use X-R-S-S for this testing to avoid having to deal with the sigstores.
// A downside is that OpenShift records signatures per image, so the error messsages below
// A downside is that OpenShift records signatures per image, so the error messages below
// list all signatures for other tags used for the same image as well.
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
@@ -1103,7 +1103,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
// Sign the image for the mirror
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by", "personal@example.com", regPrefix+"primary:unsigned", regPrefix+"mirror:mirror-signed")
// Verify that a correctly signed image for the mirror is acessible using the mirror's reference
// Verify that a correctly signed image for the mirror is accessible using the mirror's reference
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:mirror-signed", dirDest)
// … but verify that while it is accessible using the primary location redirecting to the mirror, …
assertSkopeoSucceeds(c, "" /* no --policy */, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)

View File

@@ -19,7 +19,7 @@ to start a container, then within the container:
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
An example of what can be done within the container:
cd ..; make binary-local install
cd ..; make bin/skopeo install
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
oc get istag personal:personal -o json
curl -L -v 'http://localhost:5000/v2/'

View File

@@ -94,7 +94,7 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -118,7 +118,7 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
}
func (s *SyncSuite) TestScoped(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -138,7 +138,7 @@ func (s *SyncSuite) TestScoped(c *check.C) {
}
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -173,7 +173,7 @@ func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := "alpine"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -350,7 +350,7 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
image := "busybox"
tag := "latest"
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// copy docker => docker
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image+":"+tag, localRegURL+image+":"+tag)
@@ -403,7 +403,7 @@ func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -436,7 +436,7 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := "busybox:latest"
dir1 := path.Join(tmpDir, "dir1")

57
nix/default.nix Normal file
View File

@@ -0,0 +1,57 @@
{ system ? builtins.currentSystem }:
let
pkgs = (import ./nixpkgs.nix {
config = {
packageOverrides = pkg: {
gpgme = (static pkg.gpgme);
libassuan = (static pkg.libassuan);
libgpgerror = (static pkg.libgpgerror);
libseccomp = (static pkg.libseccomp);
glib = (static pkg.glib).overrideAttrs(x: {
outputs = [ "bin" "out" "dev" ];
mesonFlags = [
"-Ddefault_library=static"
"-Ddevbindir=${placeholder ''dev''}/bin"
"-Dgtk_doc=false"
"-Dnls=disabled"
];
});
};
};
});
static = pkg: pkg.overrideAttrs(x: {
doCheck = false;
configureFlags = (x.configureFlags or []) ++ [
"--without-shared"
"--disable-shared"
];
dontDisableStatic = true;
enableSharedExecutables = false;
enableStatic = true;
});
self = with pkgs; buildGoModule rec {
name = "skopeo";
src = ./..;
vendorSha256 = null;
doCheck = false;
enableParallelBuilding = true;
outputs = [ "out" ];
nativeBuildInputs = [ bash git go-md2man installShellFiles makeWrapper pkg-config which ];
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
prePatch = ''
export CFLAGS='-static'
export LDFLAGS='-s -w -static-libgcc -static'
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
export BUILDTAGS='static netgo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
'';
buildPhase = ''
patchShebangs .
make bin/skopeo
'';
installPhase = ''
install -Dm755 bin/skopeo $out/bin/skopeo
'';
};
in self

7
nix/nixpkgs.json Normal file
View File

@@ -0,0 +1,7 @@
{
"url": "https://github.com/nixos/nixpkgs",
"rev": "d5a689edda8219a1e20fd3871174b994cf0a94a3",
"date": "2020-09-13T01:58:20+02:00",
"sha256": "0m6nmi1fx0glfbg52kqdjgidxylk4p5xnx9v35wlsfi1j2xhkia4",
"fetchSubmodules": false
}

8
nix/nixpkgs.nix Normal file
View File

@@ -0,0 +1,8 @@
let
json = builtins.fromJSON (builtins.readFile ./nixpkgs.json);
nixpkgs = import (builtins.fetchTarball {
name = "nixos-unstable";
url = "${json.url}/archive/${json.rev}.tar.gz";
inherit (json) sha256;
});
in nixpkgs

View File

@@ -1,90 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"context"
"sync/atomic"
"github.com/sirupsen/logrus"
)
var (
// G is an alias for GetLogger.
//
// We may want to define this locally to a package to get package tagged log
// messages.
G = GetLogger
// L is an alias for the standard logger.
L = logrus.NewEntry(logrus.StandardLogger())
)
type (
loggerKey struct{}
)
// TraceLevel is the log level for tracing. Trace level is lower than debug level,
// and is usually used to trace detailed behavior of the program.
const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1))
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time is always the same number of characters.
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// ParseLevel takes a string level and returns the Logrus log level constant.
// It supports trace level.
func ParseLevel(lvl string) (logrus.Level, error) {
if lvl == "trace" {
return TraceLevel, nil
}
return logrus.ParseLevel(lvl)
}
// WithLogger returns a new context with the provided logger. Use in
// combination with logger.WithField(s) for great effect.
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
return context.WithValue(ctx, loggerKey{}, logger)
}
// GetLogger retrieves the current logger from the context. If no logger is
// available, the default logger is returned.
func GetLogger(ctx context.Context) *logrus.Entry {
logger := ctx.Value(loggerKey{})
if logger == nil {
return L
}
return logger.(*logrus.Entry)
}
// Trace logs a message at level Trace with the log entry passed-in.
func Trace(e *logrus.Entry, args ...interface{}) {
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
if level >= TraceLevel {
e.Debug(args...)
}
}
// Tracef logs a message at level Trace with the log entry passed-in.
func Tracef(e *logrus.Entry, format string, args ...interface{}) {
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
if level >= TraceLevel {
e.Debugf(format, args...)
}
}

View File

@@ -1,229 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import specs "github.com/opencontainers/image-spec/specs-go/v1"
// MatchComparer is able to match and compare platforms to
// filter and sort platforms.
type MatchComparer interface {
Matcher
Less(specs.Platform, specs.Platform) bool
}
// Only returns a match comparer for a single platform
// using default resolution logic for the platform.
//
// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes)
// For ARMv7, will also match ARMv6 and ARMv5
// For ARMv6, will also match ARMv5
func Only(platform specs.Platform) MatchComparer {
platform = Normalize(platform)
if platform.Architecture == "arm" {
if platform.Variant == "v8" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v7",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v6",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
if platform.Variant == "v7" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v6",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
if platform.Variant == "v6" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
}
return singlePlatformComparer{
Matcher: &matcher{
Platform: platform,
},
}
}
// Ordered returns a platform MatchComparer which matches any of the platforms
// but orders them in order they are provided.
func Ordered(platforms ...specs.Platform) MatchComparer {
matchers := make([]Matcher, len(platforms))
for i := range platforms {
matchers[i] = NewMatcher(platforms[i])
}
return orderedPlatformComparer{
matchers: matchers,
}
}
// Any returns a platform MatchComparer which matches any of the platforms
// with no preference for ordering.
func Any(platforms ...specs.Platform) MatchComparer {
matchers := make([]Matcher, len(platforms))
for i := range platforms {
matchers[i] = NewMatcher(platforms[i])
}
return anyPlatformComparer{
matchers: matchers,
}
}
// All is a platform MatchComparer which matches all platforms
// with preference for ordering.
var All MatchComparer = allPlatformComparer{}
type singlePlatformComparer struct {
Matcher
}
func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool {
return c.Match(p1) && !c.Match(p2)
}
type orderedPlatformComparer struct {
matchers []Matcher
}
func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
for _, m := range c.matchers {
if m.Match(platform) {
return true
}
}
return false
}
func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
for _, m := range c.matchers {
p1m := m.Match(p1)
p2m := m.Match(p2)
if p1m && !p2m {
return true
}
if p1m || p2m {
return false
}
}
return false
}
type anyPlatformComparer struct {
matchers []Matcher
}
func (c anyPlatformComparer) Match(platform specs.Platform) bool {
for _, m := range c.matchers {
if m.Match(platform) {
return true
}
}
return false
}
func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
var p1m, p2m bool
for _, m := range c.matchers {
if !p1m && m.Match(p1) {
p1m = true
}
if !p2m && m.Match(p2) {
p2m = true
}
if p1m && p2m {
return false
}
}
// If one matches, and the other does, sort match first
return p1m && !p2m
}
type allPlatformComparer struct{}
func (allPlatformComparer) Match(specs.Platform) bool {
return true
}
func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
return false
}

View File

@@ -1,117 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"bufio"
"os"
"runtime"
"strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/pkg/errors"
)
// Present the ARM instruction set architecture, eg: v7, v8
var cpuVariant string
func init() {
if isArmArch(runtime.GOARCH) {
cpuVariant = getCPUVariant()
} else {
cpuVariant = ""
}
}
// For Linux, the kernel has already detected the ABI, ISA and Features.
// So we don't need to access the ARM registers to detect platform information
// by ourselves. We can just parse these information from /proc/cpuinfo
func getCPUInfo(pattern string) (info string, err error) {
if !isLinuxOS(runtime.GOOS) {
return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
}
cpuinfo, err := os.Open("/proc/cpuinfo")
if err != nil {
return "", err
}
defer cpuinfo.Close()
// Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
// the first core is enough.
scanner := bufio.NewScanner(cpuinfo)
for scanner.Scan() {
newline := scanner.Text()
list := strings.Split(newline, ":")
if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
return strings.TrimSpace(list[1]), nil
}
}
// Check whether the scanner encountered errors
err = scanner.Err()
if err != nil {
return "", err
}
return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
}
func getCPUVariant() string {
if runtime.GOOS == "windows" {
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
// runtime.GOARCH to determine the variants
var variant string
switch runtime.GOARCH {
case "arm64":
variant = "v8"
case "arm":
variant = "v7"
default:
variant = "unknown"
}
return variant
}
variant, err := getCPUInfo("Cpu architecture")
if err != nil {
log.L.WithError(err).Error("failure getting variant")
return ""
}
switch variant {
case "8", "AArch64":
variant = "v8"
case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
variant = "v7"
case "6", "6TEJ":
variant = "v6"
case "5", "5T", "5TE", "5TEJ":
variant = "v5"
case "4", "4T":
variant = "v4"
case "3":
variant = "v3"
default:
variant = "unknown"
}
return variant
}

View File

@@ -1,114 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"runtime"
"strings"
)
// isLinuxOS returns true if the operating system is Linux.
//
// The OS value should be normalized before calling this function.
func isLinuxOS(os string) bool {
return os == "linux"
}
// These function are generated from https://golang.org/src/go/build/syslist.go.
//
// We use switch statements because they are slightly faster than map lookups
// and use a little less memory.
// isKnownOS returns true if we know about the operating system.
//
// The OS value should be normalized before calling this function.
func isKnownOS(os string) bool {
switch os {
case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
return true
}
return false
}
// isArmArch returns true if the architecture is ARM.
//
// The arch value should be normalized before being passed to this function.
func isArmArch(arch string) bool {
switch arch {
case "arm", "arm64":
return true
}
return false
}
// isKnownArch returns true if we know about the architecture.
//
// The arch value should be normalized before being passed to this function.
func isKnownArch(arch string) bool {
switch arch {
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
return true
}
return false
}
func normalizeOS(os string) string {
if os == "" {
return runtime.GOOS
}
os = strings.ToLower(os)
switch os {
case "macos":
os = "darwin"
}
return os
}
// normalizeArch normalizes the architecture.
func normalizeArch(arch, variant string) (string, string) {
arch, variant = strings.ToLower(arch), strings.ToLower(variant)
switch arch {
case "i386":
arch = "386"
variant = ""
case "x86_64", "x86-64":
arch = "amd64"
variant = ""
case "aarch64", "arm64":
arch = "arm64"
switch variant {
case "8", "v8":
variant = ""
}
case "armhf":
arch = "arm"
variant = "v7"
case "armel":
arch = "arm"
variant = "v6"
case "arm":
switch variant {
case "", "7":
variant = "v7"
case "5", "6", "8":
variant = "v" + variant
}
}
return arch, variant
}

View File

@@ -1,38 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"runtime"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// DefaultString returns the default string specifier for the platform.
func DefaultString() string {
return Format(DefaultSpec())
}
// DefaultSpec returns the current platform's default platform specification.
func DefaultSpec() specs.Platform {
return specs.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
// The Variant field will be empty if arch != ARM.
Variant: cpuVariant,
}
}

View File

@@ -1,24 +0,0 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
// Default returns the default matcher for the platform.
func Default() MatchComparer {
return Only(DefaultSpec())
}

View File

@@ -1,31 +0,0 @@
// +build windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// Default returns the default matcher for the platform.
func Default() MatchComparer {
return Ordered(DefaultSpec(), specs.Platform{
OS: "linux",
Architecture: "amd64",
})
}

View File

@@ -1,279 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package platforms provides a toolkit for normalizing, matching and
// specifying container platforms.
//
// Centered around OCI platform specifications, we define a string-based
// specifier syntax that can be used for user input. With a specifier, users
// only need to specify the parts of the platform that are relevant to their
// context, providing an operating system or architecture or both.
//
// How do I use this package?
//
// The vast majority of use cases should simply use the match function with
// user input. The first step is to parse a specifier into a matcher:
//
// m, err := Parse("linux")
// if err != nil { ... }
//
// Once you have a matcher, use it to match against the platform declared by a
// component, typically from an image or runtime. Since extracting an images
// platform is a little more involved, we'll use an example against the
// platform default:
//
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
//
// This can be composed in loops for resolving runtimes or used as a filter for
// fetch and select images.
//
// More details of the specifier syntax and platform spec follow.
//
// Declaring Platform Support
//
// Components that have strict platform requirements should use the OCI
// platform specification to declare their support. Typically, this will be
// images and runtimes that should make these declaring which platform they
// support specifically. This looks roughly as follows:
//
// type Platform struct {
// Architecture string
// OS string
// Variant string
// }
//
// Most images and runtimes should at least set Architecture and OS, according
// to their GOARCH and GOOS values, respectively (follow the OCI image
// specification when in doubt). ARM should set variant under certain
// discussions, which are outlined below.
//
// Platform Specifiers
//
// While the OCI platform specifications provide a tool for components to
// specify structured information, user input typically doesn't need the full
// context and much can be inferred. To solve this problem, we introduced
// "specifiers". A specifier has the format
// `<os>|<arch>|<os>/<arch>[/<variant>]`. The user can provide either the
// operating system or the architecture or both.
//
// An example of a common specifier is `linux/amd64`. If the host has a default
// of runtime that matches this, the user can simply provide the component that
// matters. For example, if a image provides amd64 and arm64 support, the
// operating system, `linux` can be inferred, so they only have to provide
// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
// where the architecture may be known but a runtime may support images from
// different operating systems.
//
// Normalization
//
// Because not all users are familiar with the way the Go runtime represents
// platforms, several normalizations have been provided to make this package
// easier to user.
//
// The following are performed for architectures:
//
// Value Normalized
// aarch64 arm64
// armhf arm
// armel arm/v6
// i386 386
// x86_64 amd64
// x86-64 amd64
//
// We also normalize the operating system `macos` to `darwin`.
//
// ARM Support
//
// To qualify ARM architecture, the Variant field is used to qualify the arm
// version. The most common arm version, v7, is represented without the variant
// unless it is explicitly provided. This is treated as equivalent to armhf. A
// previous architecture, armel, will be normalized to arm/v6.
//
// While these normalizations are provided, their support on arm platforms has
// not yet been fully implemented and tested.
package platforms
import (
"regexp"
"runtime"
"strconv"
"strings"
"github.com/containerd/containerd/errdefs"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
var (
specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
)
// Matcher matches platforms specifications, provided by an image or runtime.
type Matcher interface {
Match(platform specs.Platform) bool
}
// NewMatcher returns a simple matcher based on the provided platform
// specification. The returned matcher only looks for equality based on os,
// architecture and variant.
//
// One may implement their own matcher if this doesn't provide the required
// functionality.
//
// Applications should opt to use `Match` over directly parsing specifiers.
func NewMatcher(platform specs.Platform) Matcher {
return &matcher{
Platform: Normalize(platform),
}
}
type matcher struct {
specs.Platform
}
func (m *matcher) Match(platform specs.Platform) bool {
normalized := Normalize(platform)
return m.OS == normalized.OS &&
m.Architecture == normalized.Architecture &&
m.Variant == normalized.Variant
}
func (m *matcher) String() string {
return Format(m.Platform)
}
// Parse parses the platform specifier syntax into a platform declaration.
//
// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
// The minimum required information for a platform specifier is the operating
// system or architecture. If there is only a single string (no slashes), the
// value will be matched against the known set of operating systems, then fall
// back to the known set of architectures. The missing component will be
// inferred based on the local environment.
func Parse(specifier string) (specs.Platform, error) {
if strings.Contains(specifier, "*") {
// TODO(stevvooe): need to work out exact wildcard handling
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
}
parts := strings.Split(specifier, "/")
for _, part := range parts {
if !specifierRe.MatchString(part) {
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
}
}
var p specs.Platform
switch len(parts) {
case 1:
// in this case, we will test that the value might be an OS, then look
// it up. If it is not known, we'll treat it as an architecture. Since
// we have very little information about the platform here, we are
// going to be a little more strict if we don't know about the argument
// value.
p.OS = normalizeOS(parts[0])
if isKnownOS(p.OS) {
// picks a default architecture
p.Architecture = runtime.GOARCH
if p.Architecture == "arm" {
// TODO(stevvooe): Resolve arm variant, if not v6 (default)
return specs.Platform{}, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented")
}
return p, nil
}
p.Architecture, p.Variant = normalizeArch(parts[0], "")
if p.Architecture == "arm" && p.Variant == "v7" {
p.Variant = ""
}
if isKnownArch(p.Architecture) {
p.OS = runtime.GOOS
return p, nil
}
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
case 2:
// In this case, we treat as a regular os/arch pair. We don't care
// about whether or not we know of the platform.
p.OS = normalizeOS(parts[0])
p.Architecture, p.Variant = normalizeArch(parts[1], "")
if p.Architecture == "arm" && p.Variant == "v7" {
p.Variant = ""
}
return p, nil
case 3:
// we have a fully specified variant, this is rare
p.OS = normalizeOS(parts[0])
p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
if p.Architecture == "arm64" && p.Variant == "" {
p.Variant = "v8"
}
return p, nil
}
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
}
// MustParse is like Parses but panics if the specifier cannot be parsed.
// Simplifies initialization of global variables.
func MustParse(specifier string) specs.Platform {
p, err := Parse(specifier)
if err != nil {
panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error())
}
return p
}
// Format returns a string specifier from the provided platform specification.
func Format(platform specs.Platform) string {
if platform.OS == "" {
return "unknown"
}
return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
}
func joinNotEmpty(s ...string) string {
var ss []string
for _, s := range s {
if s == "" {
continue
}
ss = append(ss, s)
}
return strings.Join(ss, "/")
}
// Normalize validates and translate the platform to the canonical value.
//
// For example, if "Aarch64" is encountered, we change it to "arm64" or if
// "x86_64" is encountered, it becomes "amd64".
func Normalize(platform specs.Platform) specs.Platform {
platform.OS = normalizeOS(platform.OS)
platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
// these fields are deprecated, remove them
platform.OSFeatures = nil
platform.OSVersion = ""
return platform
}

View File

@@ -40,8 +40,8 @@ func CheckAuthFile(authfile string) error {
// data with the original parameter.
func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext {
if sys != nil {
copy := *sys
sys = &copy
sysCopy := *sys
sys = &sysCopy
} else {
sys = &types.SystemContext{}
}
@@ -126,7 +126,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
// Write the new credentials to the authfile
if err = config.SetAuthentication(systemContext, server, username, password); err != nil {
if err := config.SetAuthentication(systemContext, server, username, password); err != nil {
return err
}
}
@@ -150,17 +150,13 @@ func getRegistryName(server string) string {
// gets the registry from the input. If the input is of the form
// quay.io/myuser/myimage, it will parse it and just return quay.io
split := strings.Split(server, "/")
if len(split) > 1 {
return split[0]
}
return split[0]
}
// getUserAndPass gets the username and password from STDIN if not given
// using the -u and -p flags. If the username prompt is left empty, the
// displayed userFromAuthFile will be used instead.
func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (string, string, error) {
var err error
func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) {
reader := bufio.NewReader(opts.Stdin)
username := opts.Username
if username == "" {

95
vendor/github.com/containers/common/pkg/retry/retry.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
package retry
import (
"context"
"io"
"math"
"net"
"net/url"
"syscall"
"time"
"github.com/docker/distribution/registry/api/errcode"
errcodev2 "github.com/docker/distribution/registry/api/v2"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// RetryOptions defines the option to retry
type RetryOptions struct {
MaxRetry int // The number of times to possibly retry
Delay time.Duration // The delay to use between retries, if set
}
// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
err := operation()
for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
if retryOptions.Delay != 0 {
delay = retryOptions.Delay
}
logrus.Infof("Warning: failed, retrying in %s ... (%d/%d)", delay, attempt+1, retryOptions.MaxRetry)
select {
case <-time.After(delay):
break
case <-ctx.Done():
return err
}
err = operation()
}
return err
}
func isRetryable(err error) bool {
err = errors.Cause(err)
if err == context.Canceled || err == context.DeadlineExceeded {
return false
}
type unwrapper interface {
Unwrap() error
}
switch e := err.(type) {
case errcode.Error:
switch e.Code {
case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown:
return false
}
return true
case *net.OpError:
return isRetryable(e.Err)
case *url.Error: // This includes errors returned by the net/http client.
if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF
return true
}
return isRetryable(e.Err)
case syscall.Errno:
return e != syscall.ECONNREFUSED
case errcode.Errors:
// if this error is a group of errors, process them all in turn
for i := range e {
if !isRetryable(e[i]) {
return false
}
}
return true
case *multierror.Error:
// if this error is a group of errors, process them all in turn
for i := range e.Errors {
if !isRetryable(e.Errors[i]) {
return false
}
}
return true
case unwrapper:
err = e.Unwrap()
return isRetryable(err)
}
return false
}

View File

@@ -377,7 +377,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if len(sigs) != 0 {
c.Printf("Checking if image list destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
return nil, "", errors.Wrap(err, "Can not copy signatures")
return nil, "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
}
}
canModifyManifestList := (len(sigs) == 0)
@@ -595,7 +595,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
if len(sigs) != 0 {
c.Printf("Checking if image destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
return nil, "", "", errors.Wrap(err, "Can not copy signatures")
return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
}
}

View File

@@ -3,9 +3,8 @@ package archive
import (
"context"
"io"
"os"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -13,37 +12,38 @@ import (
type archiveImageDestination struct {
*tarfile.Destination // Implements most of types.ImageDestination
ref archiveReference
writer io.Closer
archive *tarfile.Writer // Should only be closed if writer != nil
writer io.Closer // May be nil if the archive is shared
}
func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
// ref.path can be either a pipe or a regular file
// in the case of a pipe, we require that we can open it for write
// in the case of a regular file, we don't want to overwrite any pre-existing file
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
// only in a different way. Either way, its up to the user to not have two writers to the same path.)
fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", ref.path)
if ref.sourceIndex != -1 {
return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
}
fhStat, err := fh.Stat()
if err != nil {
return nil, errors.Wrapf(err, "error statting file %q", ref.path)
}
var archive *tarfile.Writer
var writer io.Closer
if ref.archiveWriter != nil {
archive = ref.archiveWriter
writer = nil
} else {
fh, err := openArchiveForWriting(ref.path)
if err != nil {
return nil, err
}
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
return nil, errors.New("docker-archive doesn't support modifying existing images")
archive = tarfile.NewWriter(fh)
writer = fh
}
tarDest := tarfile.NewDestinationWithContext(sys, fh, ref.destinationRef)
tarDest := tarfile.NewDestination(sys, archive, ref.ref)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
return &archiveImageDestination{
Destination: tarDest,
ref: ref,
writer: fh,
archive: archive,
writer: writer,
}, nil
}
@@ -60,7 +60,10 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *archiveImageDestination) Close() error {
return d.writer.Close()
if d.writer != nil {
return d.writer.Close()
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
@@ -68,5 +71,8 @@ func (d *archiveImageDestination) Close() error {
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
return d.Destination.Commit(ctx)
if d.writer != nil {
return d.archive.Close()
}
return nil
}

View File

@@ -0,0 +1,120 @@
package archive
import (
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
// Reader manages a single Docker archive, allows listing its contents and accessing
// individual images with less overhead than creating image references individually
// (because the archive is, if necessary, copied or decompressed only once).
type Reader struct {
path string // The original, user-specified path; not the maintained temporary file, if any
archive *tarfile.Reader
}
// NewReader returns a Reader for path.
// The caller should call .Close() on the returned object.
func NewReader(sys *types.SystemContext, path string) (*Reader, error) {
archive, err := tarfile.NewReaderFromFile(sys, path)
if err != nil {
return nil, err
}
return &Reader{
path: path,
archive: archive,
}, nil
}
// Close deletes temporary files associated with the Reader, if any.
func (r *Reader) Close() error {
return r.archive.Close()
}
// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport,
// and a variant of imageReference that points at the same image within the reader.
// The caller should call .Close() on the returned Reader.
func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
standalone, ok := ref.(archiveReference)
if !ok {
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
if standalone.archiveReader != nil {
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
}
reader, err := NewReader(sys, standalone.path)
if err != nil {
return nil, nil, err
}
succeeded := false
defer func() {
if !succeeded {
reader.Close()
}
}()
readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil)
if err != nil {
return nil, nil, err
}
succeeded = true
return reader, readerRef, nil
}
// List returns the a set of references for images in the Reader,
// grouped by the image the references point to.
// The references are valid only until the Reader is closed.
func (r *Reader) List() ([][]types.ImageReference, error) {
res := [][]types.ImageReference{}
for imageIndex, image := range r.archive.Manifest {
refs := []types.ImageReference{}
for _, tag := range image.RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
}
nt, ok := parsedTag.(reference.NamedTagged)
if !ok {
return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
}
ref, err := newReference(r.path, nt, -1, r.archive, nil)
if err != nil {
return nil, errors.Wrapf(err, "Error creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
}
refs = append(refs, ref)
}
if len(refs) == 0 {
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
if err != nil {
return nil, errors.Wrapf(err, "Error creating a reference for manifest item @%d", imageIndex)
}
refs = append(refs, ref)
}
res = append(res, refs)
}
return res, nil
}
// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings
// (i.e. exposing the short names before normalization).
// The function reports an error if ref does not identify a single image.
// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned;
// If ref contains a source index, or neither a NamedTagged nor a source index, all tags
// matching the image are returned.
// Almost all users should use List() or ImageReference.DockerReference() instead.
func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
archiveRef, ok := ref.(archiveReference)
if !ok {
return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
if err != nil {
return nil, err
}
if tagIndex != -1 {
return []string{manifestItem.RepoTags[tagIndex]}, nil
}
return manifestItem.RepoTags, nil
}

View File

@@ -3,9 +3,8 @@ package archive
import (
"context"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
type archiveImageSource struct {
@@ -16,13 +15,20 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
if ref.destinationRef != nil {
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
}
src, err := tarfile.NewSourceFromFileWithContext(sys, ref.path)
if err != nil {
return nil, err
var archive *tarfile.Reader
var closeArchive bool
if ref.archiveReader != nil {
archive = ref.archiveReader
closeArchive = false
} else {
a, err := tarfile.NewReaderFromFile(sys, ref.path)
if err != nil {
return nil, err
}
archive = a
closeArchive = true
}
src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
return &archiveImageSource{
Source: src,
ref: ref,

View File

@@ -3,8 +3,10 @@ package archive
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
ctrImage "github.com/containers/image/v5/image"
"github.com/containers/image/v5/transports"
@@ -42,9 +44,16 @@ func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
// archiveReference is an ImageReference for Docker images.
type archiveReference struct {
path string
// only used for destinations,
// archiveReference.destinationRef is optional and can be nil for destinations as well.
destinationRef reference.NamedTagged
// May be nil to read the only image in an archive, or to create an untagged image.
ref reference.NamedTagged
// If not -1, a zero-based index of the image in the manifest. Valid only for sources.
// Must not be set if ref is set.
sourceIndex int
// If not nil, must have been created from path (but archiveReader.path may point at a temporary
// file, not necesarily path precisely).
archiveReader *tarfile.Reader
// If not nil, must have been created for path
archiveWriter *tarfile.Writer
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
@@ -55,37 +64,69 @@ func ParseReference(refString string) (types.ImageReference, error) {
parts := strings.SplitN(refString, ":", 2)
path := parts[0]
var destinationRef reference.NamedTagged
var nt reference.NamedTagged
sourceIndex := -1
// A :tag was specified, which is only necessary for destinations.
if len(parts) == 2 {
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
return nil, errors.Wrapf(err, "docker-archive parsing reference")
// A :tag or :@index was specified.
if len(parts[1]) > 0 && parts[1][0] == '@' {
i, err := strconv.Atoi(parts[1][1:])
if err != nil {
return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
}
if i < 0 {
return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
return nil, errors.Wrapf(err, "docker-archive parsing reference")
}
ref = reference.TagNameOnly(ref)
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged { // If ref contains a digest, TagNameOnly does not change it
return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
}
nt = refTagged
}
ref = reference.TagNameOnly(ref)
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged {
// Really shouldn't be hit...
return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
}
destinationRef = refTagged
}
return NewReference(path, destinationRef)
return newReference(path, nt, sourceIndex, nil, nil)
}
// NewReference rethrns a Docker archive reference for a path and an optional destination reference.
func NewReference(path string, destinationRef reference.NamedTagged) (types.ImageReference, error) {
// NewReference returns a Docker archive reference for a path and an optional reference.
func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) {
return newReference(path, ref, -1, nil, nil)
}
// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index.
func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) {
return newReference(path, nil, sourceIndex, nil, nil)
}
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
if strings.Contains(path, ":") {
return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
if _, isDigest := destinationRef.(reference.Canonical); isDigest {
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", destinationRef.String())
if ref != nil && sourceIndex != -1 {
return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
}
if _, isDigest := ref.(reference.Canonical); isDigest {
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
}
if sourceIndex != -1 && sourceIndex < 0 {
return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
}
return archiveReference{
path: path,
destinationRef: destinationRef,
path: path,
ref: ref,
sourceIndex: sourceIndex,
archiveReader: archiveReader,
archiveWriter: archiveWriter,
}, nil
}
@@ -99,17 +140,21 @@ func (ref archiveReference) Transport() types.ImageTransport {
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
if ref.destinationRef == nil {
switch {
case ref.ref != nil:
return fmt.Sprintf("%s:%s", ref.path, ref.ref.String())
case ref.sourceIndex != -1:
return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex)
default:
return ref.path
}
return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref archiveReference) DockerReference() reference.Named {
return ref.destinationRef
return ref.ref
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.

View File

@@ -0,0 +1,82 @@
package archive
import (
"io"
"os"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
// Writer manages a single in-progress Docker archive and allows adding images to it.
type Writer struct {
path string // The original, user-specified path; not the maintained temporary file, if any
archive *tarfile.Writer
writer io.Closer
}
// NewWriter returns a Writer for path.
// The caller should call .Close() on the returned object.
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
fh, err := openArchiveForWriting(path)
if err != nil {
return nil, err
}
archive := tarfile.NewWriter(fh)
return &Writer{
path: path,
archive: archive,
writer: fh,
}, nil
}
// Close writes all outstanding data about images to the archive, and
// releases state associated with the Writer, if any.
// No more images can be added after this is called.
func (w *Writer) Close() error {
err := w.archive.Close()
if err2 := w.writer.Close(); err2 != nil && err == nil {
err = err2
}
return err
}
// NewReference returns an ImageReference that allows adding an image to Writer,
// with an optional reference.
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
return newReference(w.path, destinationRef, -1, nil, w.archive)
}
// openArchiveForWriting opens path for writing a tar archive,
// making a few sanity checks.
func openArchiveForWriting(path string) (*os.File, error) {
// path can be either a pipe or a regular file
// in the case of a pipe, we require that we can open it for write
// in the case of a regular file, we don't want to overwrite any pre-existing file
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
// only in a different way. Either way, its up to the user to not have two writers to the same path.)
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
}
succeeded := false
defer func() {
if !succeeded {
fh.Close()
}
}()
fhStat, err := fh.Stat()
if err != nil {
return nil, errors.Wrapf(err, "error statting file %q", path)
}
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
return nil, errors.New("docker-archive doesn't support modifying existing images")
}
succeeded = true
return fh, nil
}

View File

@@ -4,8 +4,8 @@ import (
"context"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
@@ -16,6 +16,7 @@ type daemonImageDestination struct {
ref daemonReference
mustMatchRuntimeOS bool
*tarfile.Destination // Implements most of types.ImageDestination
archive *tarfile.Writer
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
statusChannel <-chan error
@@ -45,6 +46,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
}
reader, writer := io.Pipe()
archive := tarfile.NewWriter(writer)
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
statusChannel := make(chan error, 1)
@@ -54,7 +56,8 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
Destination: tarfile.NewDestinationWithContext(sys, writer, namedTaggedRef),
Destination: tarfile.NewDestination(sys, archive, namedTaggedRef),
archive: archive,
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
@@ -130,7 +133,7 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
logrus.Debugf("docker-daemon: Closing tar stream")
if err := d.Destination.Commit(ctx); err != nil {
if err := d.archive.Close(); err != nil {
return err
}
if err := d.writer.Close(); err != nil {

View File

@@ -3,7 +3,7 @@ package daemon
import (
"context"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -35,10 +35,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
}
defer inputStream.Close()
src, err := tarfile.NewSourceFromStreamWithSystemContext(sys, inputStream)
archive, err := tarfile.NewReaderFromStream(sys, inputStream)
if err != nil {
return nil, err
}
src := tarfile.NewSource(archive, true, nil, -1)
return &daemonImageSource{
ref: ref,
Source: src,

View File

@@ -331,7 +331,6 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
// Results holds the results returned by the /v1/search endpoint
Results []SearchResult `json:"results"`
}
v2Res := &V2Results{}
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
@@ -388,31 +387,55 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
}
logrus.Debugf("trying to talk to v2 search endpoint")
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
searchRes := []SearchResult{}
path := "/v2/_catalog"
for len(searchRes) < limit {
resp, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, ""))
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
v2Res := &V2Results{}
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
for _, repo := range v2Res.Repositories {
if len(searchRes) == limit {
break
}
searchRes := []SearchResult{}
for _, repo := range v2Res.Repositories {
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
}
return searchRes, nil
}
link := resp.Header.Get("Link")
if link == "" {
break
}
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
if err != nil {
return searchRes, err
}
// can be relative or absolute, but we only want the path (and I
// guess we're in trouble if it forwards to a new place...)
path = linkURL.Path
if linkURL.RawQuery != "" {
path += "?"
path += linkURL.RawQuery
}
}
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
return searchRes, nil
}
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.

View File

@@ -22,7 +22,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -154,7 +153,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
@@ -175,7 +174,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer chunked")
return nil, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer chunked")
}
uploadLocation, err := res.Location()
if err != nil {
@@ -201,7 +200,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
@@ -226,7 +225,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -277,7 +276,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
default:
logrus.Debugf("Error mounting, response %#v", *res)
return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
return errors.Wrapf(registryHTTPResponseToError(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
}
}
@@ -414,7 +413,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
err = errors.Wrapf(registryHTTPResponseToError(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
@@ -641,7 +640,7 @@ sigExists:
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
return errors.Wrapf(registryHTTPResponseToError(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/client"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -193,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
}
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
@@ -235,6 +234,9 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
resp *http.Response
err error
)
if len(urls) == 0 {
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
}
for _, url := range urls {
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
if err == nil {

View File

@@ -44,3 +44,17 @@ func httpResponseToError(res *http.Response, context string) error {
return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}
}
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
// registry
func registryHTTPResponseToError(res *http.Response) error {
errResponse := client.HandleErrorResponse(res)
if e, ok := perrors.Cause(errResponse).(*client.UnexpectedHTTPResponseError); ok {
response := string(e.Response)
if len(response) > 50 {
response = response[:50] + "..."
}
errResponse = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
}
return errResponse
}

View File

@@ -0,0 +1,217 @@
package tarfile
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
archive *Writer
repoTags []reference.NamedTagged
// Other state.
config []byte
sysCtx *types.SystemContext
}
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
}
return &Destination{
archive: archive,
repoTags: repoTags,
sysCtx: sys,
}
}
// AddRepoTags adds the specified tags to the destination's repoTags.
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.repoTags = append(d.repoTags, tags...)
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *Destination) SupportedManifestMIMETypes() []string {
return []string{
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *Destination) SupportsSignatures(ctx context.Context) error {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
}
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *Destination) HasThreadSafePutBlob() bool {
// The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
// this is unknown in advance, the actual copy is serialized by d.archive, so there probably isnt
// much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(streamCopy, tee)
if err != nil {
return types.BlobInfo{}, err
}
_, err = streamCopy.Seek(0, io.SeekStart)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
if inputInfo.Digest == "" {
inputInfo.Digest = digester.Digest()
}
stream = streamCopy
logrus.Debugf("... streaming done")
}
if err := d.archive.lock(); err != nil {
return types.BlobInfo{}, err
}
defer d.archive.unlock()
// Maybe the blob has been already sent
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
return reusedInfo, nil
}
if isConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
}
d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
}
} else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
return types.BlobInfo{}, err
}
}
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if err := d.archive.lock(); err != nil {
return false, types.BlobInfo{}, err
}
defer d.archive.unlock()
return d.archive.tryReusingBlobLocked(info)
}
// PutManifest writes manifest to the destination.
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported for docker tar files`)
}
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
}
if err := d.archive.lock(); err != nil {
return err
}
defer d.archive.unlock()
if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil {
return err
}
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
}
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.Errorf(`Manifest lists are not supported for docker tar files`)
}
if len(signatures) != 0 {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
return nil
}

View File

@@ -0,0 +1,269 @@
package tarfile
import (
"archive/tar"
"encoding/json"
"io"
"io/ioutil"
"os"
"path"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
type Reader struct {
// None of the fields below are modified after the archive is created, until .Close();
// this allows concurrent readers of the same archive.
path string // "" if the archive has already been closed.
removeOnClose bool // Remove file on close if true
Manifest []ManifestItem // Guaranteed to exist after the archive is created.
}
// NewReaderFromFile returns a Reader for the specified path.
// The caller should call .Close() on the returned archive when done.
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
file, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
}
defer file.Close()
// If the file is already not compressed we can just return the file itself
// as a source. Otherwise we pass the stream to NewReaderFromStream.
stream, isCompressed, err := compression.AutoDecompress(file)
if err != nil {
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
}
defer stream.Close()
if !isCompressed {
return newReader(path, false)
}
return NewReaderFromStream(sys, stream)
}
// NewReaderFromStream returns a Reader for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewReaderFromFile returns.
// The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file")
}
defer tarCopyFile.Close()
succeeded := false
defer func() {
if !succeeded {
os.Remove(tarCopyFile.Name())
}
}()
// In order to be compatible with docker-load, we need to support
// auto-decompression (it's also a nice quality-of-life thing to avoid
// giving users really confusing "invalid tar header" errors).
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
if err != nil {
return nil, errors.Wrap(err, "Error auto-decompressing input")
}
defer uncompressedStream.Close()
// Copy the plain archive to the temporary file.
//
// TODO: This can take quite some time, and should ideally be cancellable
// using a context.Context.
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
}
succeeded = true
return newReader(tarCopyFile.Name(), true)
}
// newReader creates a Reader for the specified path and removeOnClose flag.
// The caller should call .Close() on the returned archive when done.
func newReader(path string, removeOnClose bool) (*Reader, error) {
// This is a valid enough archive, except Manifest is not yet filled.
r := Reader{
path: path,
removeOnClose: removeOnClose,
}
succeeded := false
defer func() {
if !succeeded {
r.Close()
}
}()
// We initialize Manifest immediately when constructing the Reader instead
// of later on-demand because every caller will need the data, and because doing it now
// removes the need to synchronize the access/creation of the data if the archive is later
// used from multiple goroutines to access different images.
// FIXME? Do we need to deal with the legacy format?
bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
if err != nil {
return nil, err
}
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
}
succeeded = true
return &r, nil
}
// Close removes resources associated with an initialized Reader, if any.
func (r *Reader) Close() error {
path := r.path
r.path = "" // Mark the archive as closed
if r.removeOnClose {
return os.Remove(path)
}
return nil
}
// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or
// both of which should be (nil, -1).
// On success, it returns the manifest item and an index of the matching tag, if a tag was used
// for matching; the index is -1 if a tag was not used.
func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
switch {
case ref != nil && sourceIndex != -1:
return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
ref.String(), sourceIndex)
case ref != nil:
refString := ref.String()
for i := range r.Manifest {
for tagIndex, tag := range r.Manifest[i].RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
}
if parsedTag.String() == refString {
return &r.Manifest[i], tagIndex, nil
}
}
}
return nil, -1, errors.Errorf("Tag %#v not found", refString)
case sourceIndex != -1:
if sourceIndex >= len(r.Manifest) {
return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
sourceIndex, len(r.Manifest))
}
return &r.Manifest[sourceIndex], -1, nil
default:
if len(r.Manifest) != 1 {
return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
}
return &r.Manifest[0], -1, nil
}
}
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
type tarReadCloser struct {
*tar.Reader
backingFile *os.File
}
func (t *tarReadCloser) Close() error {
return t.backingFile.Close()
}
// openTarComponent returns a ReadCloser for the specific file within the archive.
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
// It is safe to call this method from multiple goroutines simultaneously.
// The caller should call .Close() on the returned stream.
func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
// This is only a sanity check; if anyone did concurrently close ra, this access is technically
// racy against the write in .Close().
if r.path == "" {
return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader")
}
f, err := os.Open(r.path)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
f.Close()
}
}()
tarReader, header, err := findTarComponent(f, componentPath)
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
// We follow only one symlink; so no loops are possible.
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
// so we don't care.
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
}
if !header.FileInfo().Mode().IsRegular() {
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
}
// findTarComponent returns a header and a reader matching componentPath within inputFile,
// or (nil, nil, nil) if not found.
func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) {
t := tar.NewReader(inputFile)
componentPath = path.Clean(componentPath)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, nil, err
}
if path.Clean(h.Name) == componentPath {
return t, h, nil
}
}
return nil, nil, nil
}
// readTarComponent returns full contents of componentPath.
// It is safe to call this method from multiple goroutines simultaneously.
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
file, err := r.openTarComponent(path)
if err != nil {
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)
if err != nil {
return nil, err
}
return bytes, nil
}

View File

@@ -11,8 +11,8 @@ import (
"path"
"sync"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
@@ -22,8 +22,11 @@ import (
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
tarPath string
removeTarPathOnClose bool // Remove temp file on close if true
archive *Reader
closeArchive bool // .Close() the archive when the source is closed.
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
ref reference.NamedTagged // May be nil
sourceIndex int // May be -1
// The following data is only available after ensureCachedDataIsPresent() succeeds
tarManifest *ManifestItem // nil if not available yet.
configBytes []byte
@@ -41,180 +44,16 @@ type layerInfo struct {
size int64
}
// TODO: We could add support for multiple images in a single archive, so
// that people could use docker-archive:opensuse.tar:opensuse:leap as
// the source of an image.
// To do for both the NewSourceFromFile and NewSourceFromStream functions
// NewSourceFromFile returns a tarfile.Source for the specified path.
// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
// for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromFile(path string) (*Source, error) {
return NewSourceFromFileWithContext(nil, path)
}
// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
file, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
}
defer file.Close()
// If the file is already not compressed we can just return the file itself
// as a source. Otherwise we pass the stream to NewSourceFromStream.
stream, isCompressed, err := compression.AutoDecompress(file)
if err != nil {
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
}
defer stream.Close()
if !isCompressed {
return &Source{
tarPath: path,
}, nil
}
return NewSourceFromStreamWithSystemContext(sys, stream)
}
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
// temp directory for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
return NewSourceFromStreamWithSystemContext(nil, inputStream)
}
// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
// FIXME: use SystemContext here.
// Save inputStream to a temporary file
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file")
}
defer tarCopyFile.Close()
succeeded := false
defer func() {
if !succeeded {
os.Remove(tarCopyFile.Name())
}
}()
// In order to be compatible with docker-load, we need to support
// auto-decompression (it's also a nice quality-of-life thing to avoid
// giving users really confusing "invalid tar header" errors).
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
if err != nil {
return nil, errors.Wrap(err, "Error auto-decompressing input")
}
defer uncompressedStream.Close()
// Copy the plain archive to the temporary file.
//
// TODO: This can take quite some time, and should ideally be cancellable
// using a context.Context.
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
}
succeeded = true
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
// and sourceIndex (or the only image if they are (nil, -1)).
// The archive will be closed if closeArchive
func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
return &Source{
tarPath: tarCopyFile.Name(),
removeTarPathOnClose: true,
}, nil
}
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
type tarReadCloser struct {
*tar.Reader
backingFile *os.File
}
func (t *tarReadCloser) Close() error {
return t.backingFile.Close()
}
// openTarComponent returns a ReadCloser for the specific file within the archive.
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
// The caller should call .Close() on the returned stream.
func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
f, err := os.Open(s.tarPath)
if err != nil {
return nil, err
archive: archive,
closeArchive: closeArchive,
ref: ref,
sourceIndex: sourceIndex,
}
succeeded := false
defer func() {
if !succeeded {
f.Close()
}
}()
tarReader, header, err := findTarComponent(f, componentPath)
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
// We follow only one symlink; so no loops are possible.
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
// so we don't care.
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
}
if !header.FileInfo().Mode().IsRegular() {
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
}
// findTarComponent returns a header and a reader matching path within inputFile,
// or (nil, nil, nil) if not found.
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
t := tar.NewReader(inputFile)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, nil, err
}
if h.Name == path {
return t, h, nil
}
}
return nil, nil, nil
}
// readTarComponent returns full contents of componentPath.
func (s *Source) readTarComponent(path string, limit int) ([]byte, error) {
file, err := s.openTarComponent(path)
if err != nil {
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)
if err != nil {
return nil, err
}
return bytes, nil
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
@@ -229,37 +68,31 @@ func (s *Source) ensureCachedDataIsPresent() error {
// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
// Call ensureCachedDataIsPresent instead.
func (s *Source) ensureCachedDataIsPresentPrivate() error {
// Read and parse manifest.json
tarManifest, err := s.loadTarManifest()
tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex)
if err != nil {
return err
}
// Check to make sure length is 1
if len(tarManifest) != 1 {
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
}
// Read and parse config.
configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize)
configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize)
if err != nil {
return err
}
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
}
if parsedConfig.RootFS == nil {
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest[0].Config)
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
}
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
if err != nil {
return err
}
// Success; commit.
s.tarManifest = &tarManifest[0]
s.tarManifest = tarManifest
s.configBytes = configBytes
s.configDigest = digest.FromBytes(configBytes)
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
@@ -267,31 +100,17 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
return nil
}
// loadTarManifest loads and decodes the manifest.json.
func (s *Source) loadTarManifest() ([]ManifestItem, error) {
// FIXME? Do we need to deal with the legacy format?
bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
if err != nil {
return nil, err
}
var items []ManifestItem
if err := json.Unmarshal(bytes, &items); err != nil {
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
}
return items, nil
}
// Close removes resources associated with an initialized Source, if any.
func (s *Source) Close() error {
if s.removeTarPathOnClose {
return os.Remove(s.tarPath)
if s.closeArchive {
return s.archive.Close()
}
return nil
}
// LoadTarManifest loads and decodes the manifest.json
func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
return s.loadTarManifest()
// TarManifest returns contents of manifest.json
func (s *Source) TarManifest() []ManifestItem {
return s.archive.Manifest
}
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
@@ -308,7 +127,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
continue
}
layerPath := tarManifest.Layers[i]
layerPath := path.Clean(tarManifest.Layers[i])
if _, ok := unknownLayerSizes[layerPath]; ok {
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
}
@@ -321,7 +140,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
// Scan the tar file to collect layer sizes.
file, err := os.Open(s.tarPath)
file, err := os.Open(s.archive.path)
if err != nil {
return nil, err
}
@@ -335,7 +154,9 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
if err != nil {
return nil, err
}
if li, ok := unknownLayerSizes[h.Name]; ok {
layerPath := path.Clean(h.Name)
// FIXME: Cache this data across images in Reader.
if li, ok := unknownLayerSizes[layerPath]; ok {
// Since GetBlob will decompress layers that are compressed we need
// to do the decompression here as well, otherwise we will
// incorrectly report the size. Pretty critical, since tools like
@@ -343,7 +164,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// the slower method of checking if it's compressed.
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
if err != nil {
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name)
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", layerPath)
}
defer uncompressedStream.Close()
@@ -351,11 +172,11 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
if isCompressed {
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
if err != nil {
return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name)
return nil, errors.Wrapf(err, "Error reading %s to find its size", layerPath)
}
}
li.size = uncompressedSize
delete(unknownLayerSizes, h.Name)
delete(unknownLayerSizes, layerPath)
}
}
if len(unknownLayerSizes) != 0 {
@@ -446,7 +267,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
}
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
underlyingStream, err := s.openTarComponent(li.path)
underlyingStream, err := s.archive.openTarComponent(li.path)
if err != nil {
return nil, 0, err
}

View File

@@ -17,7 +17,7 @@ const (
)
// ManifestItem is an element of the array stored in the top-level manifest.json file.
type ManifestItem struct {
type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API.
Config string
RepoTags []string
Layers []string

View File

@@ -0,0 +1,381 @@
package tarfile
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sync"
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
type Writer struct {
mutex sync.Mutex
// ALL of the following members can only be accessed with the mutex held.
// Use Writer.lock() to obtain the mutex.
writer io.Writer
tar *tar.Writer // nil if the Writer has already been closed.
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
repositories map[string]map[string]string
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
manifest []ManifestItem
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
}
// NewWriter returns a Writer for the specified io.Writer.
// The caller must eventually call .Close() on the returned object to create a valid archive.
func NewWriter(dest io.Writer) *Writer {
return &Writer{
writer: dest,
tar: tar.NewWriter(dest),
blobs: make(map[digest.Digest]types.BlobInfo),
repositories: map[string]map[string]string{},
legacyLayers: map[string]struct{}{},
manifestByConfig: map[digest.Digest]int{},
}
}
// lock does some sanity checks and locks the Writer.
// If this function suceeds, the caller must call w.unlock.
// Do not use Writer.mutex directly.
func (w *Writer) lock() error {
w.mutex.Lock()
if w.tar == nil {
w.mutex.Unlock()
return errors.New("Internal error: trying to use an already closed tarfile.Writer")
}
return nil
}
// unlock releases the lock obtained by Writer.lock
// Do not use Writer.mutex directly.
func (w *Writer) unlock() {
w.mutex.Unlock()
}
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
// info.Digest must not be empty.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// The caller must have locked the Writer.
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := w.blobs[info.Digest]; ok {
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
return false, types.BlobInfo{}, nil
}
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
// The caller must have locked the Writer.
func (w *Writer) recordBlobLocked(info types.BlobInfo) {
w.blobs[info.Digest] = info
}
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
// The caller must have locked the Writer.
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
if _, ok := w.legacyLayers[layerID]; !ok {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest)
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
return errors.Wrap(err, "Error creating layer symbolic link")
}
b := []byte("1.0")
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
return errors.Wrap(err, "Error writing VERSION file")
}
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
return errors.Wrap(err, "Error writing config json file")
}
w.legacyLayers[layerID] = struct{}{}
}
return nil
}
// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image.
func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error {
var chainID digest.Digest
lastLayerID := ""
for i, l := range layerDescriptors {
// The legacy format requires a config file per layer
layerConfig := make(map[string]interface{})
// The root layer doesn't have any parent
if lastLayerID != "" {
layerConfig["parent"] = lastLayerID
}
// The top layer configuration file is generated by using subpart of the image configuration
if i == len(layerDescriptors)-1 {
var config map[string]*json.RawMessage
err := json.Unmarshal(configBytes, &config)
if err != nil {
return errors.Wrap(err, "Error unmarshaling config")
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
}
}
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
if chainID == "" {
chainID = l.Digest
} else {
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
}
// … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because
// we create the image configs differently in details. At least recent versions allocate new IDs on load,
// so this is fine as long as the IDs we use are unique / cannot loop.
//
// For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created
// config makes sure that everything uses the same “namespace”; a bit less efficient but clearer.
//
// Temporarily add the chainID to the config, only for the purpose of generating the image ID.
layerConfig["layer_id"] = chainID
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
if err != nil {
return errors.Wrap(err, "Error marshaling layer config")
}
delete(layerConfig, "layer_id")
layerID := digest.Canonical.FromBytes(b).Hex()
layerConfig["id"] = layerID
configBytes, err := json.Marshal(layerConfig)
if err != nil {
return errors.Wrap(err, "Error marshaling layer config")
}
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
return err
}
lastLayerID = layerID
}
if lastLayerID != "" {
for _, repoTag := range repoTags {
if val, ok := w.repositories[repoTag.Name()]; ok {
val[repoTag.Tag()] = lastLayerID
} else {
w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID}
}
}
}
return nil
}
// checkManifestItemsMatch checks that a and b describe the same image,
// and returns an error if thats not the case (which should never happen).
func checkManifestItemsMatch(a, b *ManifestItem) error {
if a.Config != b.Config {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
}
if len(a.Layers) != len(b.Layers) {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
}
for i := range a.Layers {
if a.Layers[i] != b.Layers[i] {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
}
}
// Ignore RepoTags, that will be built later.
// Ignore Parent and LayerSources, which we dont set to anything meaningful.
return nil
}
// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags
// The caller must have locked the Writer.
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
layerPaths := []string{}
for _, l := range layerDescriptors {
layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
}
var item *ManifestItem
newItem := ManifestItem{
Config: w.configPath(configDigest),
RepoTags: []string{},
Layers: layerPaths,
Parent: "", // We dont have this information
LayerSources: nil,
}
if i, ok := w.manifestByConfig[configDigest]; ok {
item = &w.manifest[i]
if err := checkManifestItemsMatch(item, &newItem); err != nil {
return err
}
} else {
i := len(w.manifest)
w.manifestByConfig[configDigest] = i
w.manifest = append(w.manifest, newItem)
item = &w.manifest[i]
}
knownRepoTags := map[string]struct{}{}
for _, repoTag := range item.RepoTags {
knownRepoTags[repoTag] = struct{}{}
}
for _, tag := range repoTags {
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
if _, ok := knownRepoTags[refString]; !ok {
item.RepoTags = append(item.RepoTags, refString)
knownRepoTags[refString] = struct{}{}
}
}
return nil
}
// Close writes all outstanding data about images to the archive, and finishes writing data
// to the underlying io.Writer.
// No more images can be added after this is called.
func (w *Writer) Close() error {
if err := w.lock(); err != nil {
return err
}
defer w.unlock()
b, err := json.Marshal(&w.manifest)
if err != nil {
return err
}
if err := w.sendBytesLocked(manifestFileName, b); err != nil {
return err
}
b, err = json.Marshal(w.repositories)
if err != nil {
return errors.Wrap(err, "Error marshaling repositories")
}
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
return errors.Wrap(err, "Error writing config json file")
}
if err := w.tar.Close(); err != nil {
return err
}
w.tar = nil // Mark the Writer as closed.
return nil
}
// configPath returns a path we choose for storing a config with the specified digest.
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) configPath(configDigest digest.Digest) string {
return configDigest.Hex() + ".json"
}
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
return layerDigest.Hex() + ".tar"
}
type tarFI struct {
path string
size int64
isSymlink bool
}
func (t *tarFI) Name() string {
return t.path
}
func (t *tarFI) Size() int64 {
return t.size
}
func (t *tarFI) Mode() os.FileMode {
if t.isSymlink {
return os.ModeSymlink
}
return 0444
}
func (t *tarFI) ModTime() time.Time {
return time.Unix(0, 0)
}
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
return nil
}
// sendSymlinkLocked sends a symlink into the tar stream.
// The caller must have locked the Writer.
func (w *Writer) sendSymlinkLocked(path string, target string) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
if err != nil {
return nil
}
logrus.Debugf("Sending as tar link %s -> %s", path, target)
return w.tar.WriteHeader(hdr)
}
// sendBytesLocked sends a path into the tar stream.
// The caller must have locked the Writer.
func (w *Writer) sendBytesLocked(path string, b []byte) error {
return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b))
}
// sendFileLocked sends a file into the tar stream.
// The caller must have locked the Writer.
func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil {
return nil
}
logrus.Debugf("Sending as tar file %s", path)
if err := w.tar.WriteHeader(hdr); err != nil {
return err
}
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
size, err := io.Copy(w.tar, stream)
if err != nil {
return err
}
if size != expectedSize {
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -26,6 +27,9 @@ var systemRegistriesDirPath = builtinRegistriesDirPath
// DO NOT change this, instead see systemRegistriesDirPath above.
const builtinRegistriesDirPath = "/etc/containers/registries.d"
// userRegistriesDirPath is the path to the per user registries.d.
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
// NOTE: Keep this in sync with docs/registries.d.md!
type registryConfiguration struct {
@@ -75,14 +79,17 @@ func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReferenc
// registriesDirPath returns a path to registries.d
func registriesDirPath(sys *types.SystemContext) string {
if sys != nil {
if sys.RegistriesDirPath != "" {
return sys.RegistriesDirPath
}
if sys.RootForImplicitAbsolutePaths != "" {
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
}
if sys != nil && sys.RegistriesDirPath != "" {
return sys.RegistriesDirPath
}
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
}
return systemRegistriesDirPath
}

View File

@@ -1,424 +0,0 @@
package tarfile
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
writer io.Writer
tar *tar.Writer
repoTags []reference.NamedTagged
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
config []byte
sysCtx *types.SystemContext
}
// NewDestination returns a tarfile.Destination for the specified io.Writer.
// Deprecated: please use NewDestinationWithContext instead
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
return NewDestinationWithContext(nil, dest, ref)
}
// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
}
return &Destination{
writer: dest,
tar: tar.NewWriter(dest),
repoTags: repoTags,
blobs: make(map[digest.Digest]types.BlobInfo),
sysCtx: sys,
}
}
// AddRepoTags adds the specified tags to the destination's repoTags.
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.repoTags = append(d.repoTags, tags...)
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *Destination) SupportedManifestMIMETypes() []string {
return []string{
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *Destination) SupportsSignatures(ctx context.Context) error {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
}
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *Destination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(streamCopy, tee)
if err != nil {
return types.BlobInfo{}, err
}
_, err = streamCopy.Seek(0, io.SeekStart)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
if inputInfo.Digest == "" {
inputInfo.Digest = digester.Digest()
}
stream = streamCopy
logrus.Debugf("... streaming done")
}
// Maybe the blob has been already sent
ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
return reusedInfo, nil
}
if isConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
}
d.config = buf
if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
}
} else {
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil {
return types.BlobInfo{}, err
}
}
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := d.blobs[info.Digest]; ok {
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
return false, types.BlobInfo{}, nil
}
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
repositories := map[string]map[string]string{}
for _, repoTag := range d.repoTags {
if val, ok := repositories[repoTag.Name()]; ok {
val[repoTag.Tag()] = rootLayerID
} else {
repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID}
}
}
b, err := json.Marshal(repositories)
if err != nil {
return errors.Wrap(err, "Error marshaling repositories")
}
if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil {
return errors.Wrap(err, "Error writing config json file")
}
return nil
}
// PutManifest writes manifest to the destination.
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported for docker tar files`)
}
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
}
layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors)
if err != nil {
return err
}
if len(man.LayersDescriptors) > 0 {
if err := d.createRepositoriesFile(lastLayerID); err != nil {
return err
}
}
repoTags := []string{}
for _, tag := range d.repoTags {
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
repoTags = append(repoTags, refString)
}
items := []ManifestItem{{
Config: man.ConfigDescriptor.Digest.Hex() + ".json",
RepoTags: repoTags,
Layers: layerPaths,
Parent: "",
LayerSources: nil,
}}
itemsBytes, err := json.Marshal(&items)
if err != nil {
return err
}
// FIXME? Do we also need to support the legacy format?
return d.sendBytes(manifestFileName, itemsBytes)
}
// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers
func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) {
var chainID digest.Digest
lastLayerID = ""
for i, l := range layerDescriptors {
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
if chainID == "" {
chainID = l.Digest
} else {
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
}
// … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent
// versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop.
//
// Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID
// (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more
// times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation
// which also mixes in the full image configuration seems unnecessary, at least as long as we are storing
// only a single image per tarball, i.e. all DiffID prefixes are unique (cant differ only with
// configuration).
layerID := chainID.Hex()
physicalLayerPath := l.Digest.Hex() + ".tar"
// The layer itself has been stored into physicalLayerPath in PutManifest.
// So, use that path for layerPaths used in the non-legacy manifest
layerPaths = append(layerPaths, physicalLayerPath)
// ... and create a symlink for the legacy format;
if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
return nil, "", errors.Wrap(err, "Error creating layer symbolic link")
}
b := []byte("1.0")
if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
return nil, "", errors.Wrap(err, "Error writing VERSION file")
}
// The legacy format requires a config file per layer
layerConfig := make(map[string]interface{})
layerConfig["id"] = layerID
// The root layer doesn't have any parent
if lastLayerID != "" {
layerConfig["parent"] = lastLayerID
}
// The root layer configuration file is generated by using subpart of the image configuration
if i == len(layerDescriptors)-1 {
var config map[string]*json.RawMessage
err := json.Unmarshal(d.config, &config)
if err != nil {
return nil, "", errors.Wrap(err, "Error unmarshaling config")
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
}
}
b, err := json.Marshal(layerConfig)
if err != nil {
return nil, "", errors.Wrap(err, "Error marshaling layer config")
}
if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil {
return nil, "", errors.Wrap(err, "Error writing config json file")
}
lastLayerID = layerID
}
return layerPaths, lastLayerID, nil
}
type tarFI struct {
path string
size int64
isSymlink bool
}
func (t *tarFI) Name() string {
return t.path
}
func (t *tarFI) Size() int64 {
return t.size
}
func (t *tarFI) Mode() os.FileMode {
if t.isSymlink {
return os.ModeSymlink
}
return 0444
}
func (t *tarFI) ModTime() time.Time {
return time.Unix(0, 0)
}
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
return nil
}
// sendSymlink sends a symlink into the tar stream.
func (d *Destination) sendSymlink(path string, target string) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
if err != nil {
return nil
}
logrus.Debugf("Sending as tar link %s -> %s", path, target)
return d.tar.WriteHeader(hdr)
}
// sendBytes sends a path into the tar stream.
func (d *Destination) sendBytes(path string, b []byte) error {
return d.sendFile(path, int64(len(b)), bytes.NewReader(b))
}
// sendFile sends a file into the tar stream.
func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil {
return nil
}
logrus.Debugf("Sending as tar file %s", path)
if err := d.tar.WriteHeader(hdr); err != nil {
return err
}
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
size, err := io.Copy(d.tar, stream)
if err != nil {
return err
}
if size != expectedSize {
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.Errorf(`Manifest lists are not supported for docker tar files`)
}
if len(signatures) != 0 {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
return nil
}
// Commit finishes writing data to the underlying io.Writer.
// It is the caller's responsibility to close it, if necessary.
func (d *Destination) Commit(ctx context.Context) error {
return d.tar.Close()
}

View File

@@ -1,3 +0,0 @@
// Package tarfile is an internal implementation detail of some transports.
// Do not use outside of the github.com/containers/image repo!
package tarfile

View File

@@ -183,7 +183,12 @@ func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (temp
src := ref.resolvedFile
dst := tempDirRef.tempDirectory
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
if err := archive.UntarPath(src, dst); err != nil {
arch, err := os.Open(src)
if err != nil {
return tempDirOCIRef{}, err
}
defer arch.Close()
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
}

View File

@@ -141,6 +141,10 @@ func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
}
func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
if len(urls) == 0 {
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
}
errWrap := errors.New("failed fetching external blob from all urls")
for _, url := range urls {

View File

@@ -11,9 +11,9 @@ import (
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/docker/docker/pkg/homedir"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -345,7 +345,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
return errors.Wrapf(err, "error marshaling JSON %q", path)
}
if err = ioutil.WriteFile(path, newData, 0755); err != nil {
if err = ioutil.WriteFile(path, newData, 0600); err != nil {
return errors.Wrapf(err, "error writing to file %q", path)
}
}

View File

@@ -338,55 +338,86 @@ func (config *V2RegistriesConf) postProcess() error {
}
// ConfigPath returns the path to the system-wide registry configuration file.
// Deprecated: This API implies configuration is read from files, and that there is only one.
// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
func ConfigPath(ctx *types.SystemContext) string {
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
return ctx.SystemRegistriesConfPath
}
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
if _, err := os.Stat(userRegistriesFilePath); err == nil {
return userRegistriesFilePath
}
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
}
return systemRegistriesConfPath
return newConfigWrapper(ctx).configPath
}
// ConfigDirPath returns the path to the system-wide directory for drop-in
// ConfigDirPath returns the path to the directory for drop-in
// registry configuration files.
// Deprecated: This API implies configuration is read from directories, and that there is only one.
// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
func ConfigDirPath(ctx *types.SystemContext) string {
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
return ctx.SystemRegistriesConfDirPath
configWrapper := newConfigWrapper(ctx)
if configWrapper.userConfigDirPath != "" {
return configWrapper.userConfigDirPath
}
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
}
return systemRegistriesConfDirPath
return configWrapper.configDirPath
}
// configWrapper is used to store the paths from ConfigPath and ConfigDirPath
// and acts as a key to the internal cache.
type configWrapper struct {
configPath string
// path to the registries.conf file
configPath string
// path to system-wide registries.conf.d directory, or "" if not used
configDirPath string
// path to user specificed registries.conf.d directory, or "" if not used
userConfigDirPath string
}
// newConfigWrapper returns a configWrapper for the specified SystemContext.
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
return configWrapper{
configPath: ConfigPath(ctx),
configDirPath: ConfigDirPath(ctx),
var wrapper configWrapper
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
// decide configPath using per-user path or system file
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
wrapper.configPath = ctx.SystemRegistriesConfPath
} else if _, err := os.Stat(userRegistriesFilePath); err == nil {
// per-user registries.conf exists, not reading system dir
// return config dirs from ctx or per-user one
wrapper.configPath = userRegistriesFilePath
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
} else {
wrapper.userConfigDirPath = userRegistriesDirPath
}
return wrapper
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
} else {
wrapper.configPath = systemRegistriesConfPath
}
// potentially use both system and per-user dirs if not using per-user config file
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
// dir explicitly chosen: use only that one
wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
wrapper.userConfigDirPath = userRegistriesDirPath
} else {
wrapper.configDirPath = systemRegistriesConfDirPath
wrapper.userConfigDirPath = userRegistriesDirPath
}
return wrapper
}
// ConfigurationSourceDescription returns a string containres paths of registries.conf and registries.conf.d
func ConfigurationSourceDescription(ctx *types.SystemContext) string {
wrapper := newConfigWrapper(ctx)
configSources := []string{wrapper.configPath}
if wrapper.configDirPath != "" {
configSources = append(configSources, wrapper.configDirPath)
}
if wrapper.userConfigDirPath != "" {
configSources = append(configSources, wrapper.userConfigDirPath)
}
return strings.Join(configSources, ", ")
}
// configMutex is used to synchronize concurrent accesses to configCache.
@@ -422,39 +453,49 @@ func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d
// directory.
func dropInConfigs(wrapper configWrapper) ([]string, error) {
var configs []string
err := filepath.Walk(wrapper.configDirPath,
// WalkFunc to read additional configs
func(path string, info os.FileInfo, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
case info == nil:
// this should only happen when err != nil but let's be sure
return nil
case info.IsDir():
if path != wrapper.configDirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
}
// ignore directories
return nil
default:
// only add *.conf files
if strings.HasSuffix(path, ".conf") {
configs = append(configs, path)
}
return nil
}
},
var (
configs []string
dirPaths []string
)
if wrapper.configDirPath != "" {
dirPaths = append(dirPaths, wrapper.configDirPath)
}
if wrapper.userConfigDirPath != "" {
dirPaths = append(dirPaths, wrapper.userConfigDirPath)
}
for _, dirPath := range dirPaths {
err := filepath.Walk(dirPath,
// WalkFunc to read additional configs
func(path string, info os.FileInfo, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
case info == nil:
// this should only happen when err != nil but let's be sure
return nil
case info.IsDir():
if path != dirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
}
// ignore directories
return nil
default:
// only add *.conf files
if strings.HasSuffix(path, ".conf") {
configs = append(configs, path)
}
return nil
}
},
)
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
return nil, errors.Wrapf(err, "error reading registries.conf.d")
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
return nil, errors.Wrapf(err, "error reading registries.conf.d")
}
}
return configs, nil

View File

@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 5
VersionMinor = 6
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

22
vendor/github.com/containers/ocicrypt/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,22 @@
dist: xenial
language: go
os:
- linux
go:
- "1.13.x"
matrix:
include:
- os: linux
go_import_path: github.com/containers/ocicrypt
install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.19.1
script:
- make
- make check
- make test

3
vendor/github.com/containers/ocicrypt/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,3 @@
## Security and Disclosure Information Policy for the OCIcrypt Library Project
The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.

View File

@@ -3,15 +3,13 @@ module github.com/containers/ocicrypt
go 1.12
require (
github.com/containerd/containerd v1.2.10
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.1
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/stretchr/testify v1.3.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
google.golang.org/grpc v1.24.0 // indirect
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect
gopkg.in/square/go-jose.v2 v2.3.1
gotest.tools v2.2.0+incompatible // indirect
)

View File

@@ -1,23 +1,7 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/containerd v1.2.10 h1:liQDhXqIn7y6cJ/7qBgOaZsiTZJc56/wkkhDBiDBRDw=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@@ -26,22 +10,16 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -49,15 +27,5 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/p
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -170,7 +170,7 @@ func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool,
var args []string
if gc.gpgHomeDir != "" {
args = append([]string{"--homedir", gc.gpgHomeDir})
args = []string{"--homedir", gc.gpgHomeDir}
}
args = append(args, option, fmt.Sprintf("0x%x", keyid))
@@ -229,7 +229,7 @@ func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool,
var args []string
if gc.gpgHomeDir != "" {
args = append([]string{"--homedir", gc.gpgHomeDir})
args = []string{"--homedir", gc.gpgHomeDir}
}
args = append(args, option, fmt.Sprintf("0x%x", keyid))

View File

@@ -1,19 +1,16 @@
package helpers
import (
"context"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config"
encutils "github.com/containers/ocicrypt/utils"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
@@ -66,6 +63,23 @@ func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, er
return gpgRecipients, pubkeys, x509s, nil
}
// processx509Certs processes x509 certificate files
func processx509Certs(keys []string) ([][]byte, error) {
var x509s [][]byte
for _, key := range keys {
tmp, err := ioutil.ReadFile(key)
if err != nil {
return nil, errors.Wrap(err, "Unable to read file")
}
if !encutils.IsCertificate(tmp) {
continue
}
x509s = append(x509s, tmp)
}
return x509s, nil
}
// processPwdString process a password that may be in any of the following formats:
// - file=<passwordfile>
// - pass=<password>
@@ -141,33 +155,14 @@ func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]b
gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp)
gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password)
} else {
return nil, nil, nil, nil, fmt.Errorf("unidentified private key in file %s (password=%s)", keyfile, string(password))
// ignore if file is not recognized, so as not to error if additional
// metadata/cert files exists
continue
}
}
return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, nil
}
func createGPGClient(context context.Context) (ocicrypt.GPGClient, error) {
return ocicrypt.NewGPGClient(context.Value("gpg-version").(string), context.Value("gpg-homedir").(string))
}
func getGPGPrivateKeys(context context.Context, gpgSecretKeyRingFiles [][]byte, descs []ocispec.Descriptor, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) {
gpgClient, err := createGPGClient(context)
if err != nil {
return nil, nil, err
}
var gpgVault ocicrypt.GPGVault
if len(gpgSecretKeyRingFiles) > 0 {
gpgVault = ocicrypt.NewGPGVault()
err = gpgVault.AddSecretKeyRingDataArray(gpgSecretKeyRingFiles)
if err != nil {
return nil, nil, err
}
}
return ocicrypt.GPGGetPrivateKey(descs, gpgClient, gpgVault, mustFindKey)
}
// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary
// information to perform decryption from command line options and possibly
// LayerInfos describing the image and helping us to query for the PGP decryption keys
@@ -180,6 +175,13 @@ func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig
return encconfig.CryptoConfig{}, err
}
// x509 certs can also be passed in via keys
x509FromKeys, err := processx509Certs(keys)
if err != nil {
return encconfig.CryptoConfig{}, err
}
x509s = append(x509s, x509FromKeys...)
gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, err := processPrivateKeyFiles(keys)
if err != nil {
return encconfig.CryptoConfig{}, err
@@ -236,20 +238,6 @@ func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig
return encconfig.CombineCryptoConfigs(ccs), nil
}
// parsePlatformArray parses an array of specifiers and converts them into an array of specs.Platform
func parsePlatformArray(specifiers []string) ([]ocispec.Platform, error) {
var speclist []ocispec.Platform
for _, specifier := range specifiers {
spec, err := platforms.Parse(specifier)
if err != nil {
return []ocispec.Platform{}, err
}
speclist = append(speclist, spec)
}
return speclist, nil
}
// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys
func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) {
var decryptCc *encconfig.CryptoConfig

View File

@@ -23,8 +23,8 @@ import (
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
"github.com/fullsailor/pkcs7"
"github.com/pkg/errors"
"go.mozilla.org/pkcs7"
)
type pkcs7KeyWrapper struct {

View File

@@ -19,12 +19,12 @@ env:
###
FEDORA_NAME: "fedora-32"
PRIOR_FEDORA_NAME: "fedora-31"
UBUNTU_NAME: "ubuntu-19"
PRIOR_UBUNTU_NAME: "ubuntu-18"
UBUNTU_NAME: "ubuntu-20"
PRIOR_UBUNTU_NAME: "ubuntu-19"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
_BUILT_IMAGE_SUFFIX: "libpod-6224667180531712" # From the packer output of 'build_vm_images_script'
_BUILT_IMAGE_SUFFIX: "libpod-6508632441356288"
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
@@ -62,8 +62,10 @@ testing_task:
- lint
# Not all $TEST_DRIVER combinations are valid for all OS types.
# Note: Nested-variable resolution happens at runtime, not eval. time.
# Use verbose logic for ease of reading/maintaining.
# N/B: As of the addition of this note, nested-variable resolution
# does not happen for boolean `only_if` expressions. Since $VM_IMAGE
# contains nested variables, we must filter based on that and not the
# actual distro/version value.
only_if: >-
( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "vfs" ) ||
( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "aufs" ) ||
@@ -104,7 +106,7 @@ lint_task:
env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
container:
image: golang:1.12
image: golang:1.15
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
@@ -140,9 +142,21 @@ meta_task:
vendor_task:
container:
image: golang:1.13
image: golang:1.15
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
build_script: make vendor
test_script: hack/tree_status.sh
# Represent overall pass/fail status from required dependent tasks
success_task:
depends_on:
- lint
- testing
- meta
- vendor
container:
image: golang:1.15
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true

View File

@@ -1 +1 @@
1.20.2
1.23.5

View File

@@ -143,10 +143,6 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
}
dstPath := filepath.Join(dstDir, relPath)
if err != nil {
return err
}
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)

View File

@@ -51,6 +51,10 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
if c.checker.IsMounted(path) {
m.count++
}
} else if !c.checker.IsMounted(path) {
// if the unmount was performed outside of this process (e.g. conmon cleanup)
//the ref counter would lose track of it. Check if it is still mounted.
m.count = 0
}
infoOp(m)
count := m.count

View File

@@ -23,6 +23,7 @@ type directLVMConfig struct {
ThinpMetaPercent uint64
AutoExtendPercent uint64
AutoExtendThreshold uint64
MetaDataSize string
}
var (
@@ -121,15 +122,19 @@ func checkDevHasFS(dev string) error {
}
func verifyBlockDevice(dev string, force bool) error {
realPath, err := filepath.Abs(dev)
absPath, err := filepath.Abs(dev)
if err != nil {
return errors.Errorf("unable to get absolute path for %s: %s", dev, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
realPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
return errors.Errorf("failed to canonicalise path for %s: %s", dev, err)
}
if err := checkDevAvailable(realPath); err != nil {
return err
if err := checkDevAvailable(absPath); err != nil {
logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath)
if err := checkDevAvailable(realPath); err != nil {
return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath)
}
}
if err := checkDevInVG(realPath); err != nil {
return err
@@ -205,8 +210,11 @@ func setupDirectLVM(cfg directLVMConfig) error {
if cfg.ThinpMetaPercent == 0 {
cfg.ThinpMetaPercent = 1
}
if cfg.MetaDataSize == "" {
cfg.MetaDataSize = "128k"
}
out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput()
out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput()
if err != nil {
return errors.Wrap(err, string(out))
}

View File

@@ -101,6 +101,7 @@ type DeviceSet struct {
// Options
dataLoopbackSize int64
metaDataSize string
metaDataLoopbackSize int64
baseFsSize uint64
filesystem string
@@ -1748,7 +1749,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
// - Managed by container storage
// - The target of this device is at major <maj> and minor <min>
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino)
devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino)
logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix)
// Check for the existence of the thin-pool device
@@ -2708,6 +2709,8 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
devices.mountOptions = joinMountOptions(devices.mountOptions, val)
case "dm.metadatadev":
devices.metadataDevice = val
case "dm.metadata_size":
devices.metaDataSize = val
case "dm.datadev":
devices.dataDevice = val
case "dm.thinpooldev":
@@ -2743,6 +2746,8 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
return nil, err
}
case "dm.metaDataSize":
lvmSetupConfig.MetaDataSize = val
case "dm.min_free_space":
if !strings.HasSuffix(val, "%") {
return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix")

View File

@@ -274,22 +274,28 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
key = strings.ToLower(key)
switch key {
case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
trimkey := strings.ToLower(key)
trimkey = strings.TrimPrefix(trimkey, "overlay.")
trimkey = strings.TrimPrefix(trimkey, "overlay2.")
trimkey = strings.TrimPrefix(trimkey, ".")
switch trimkey {
case "override_kernel_check":
logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary")
case ".mountopt", "overlay.mountopt", "overlay2.mountopt":
case "mountopt":
o.mountOptions = val
case ".size", "overlay.size", "overlay2.size":
case "size":
logrus.Debugf("overlay: size=%s", val)
size, err := units.RAMInBytes(val)
if err != nil {
return nil, err
}
o.quota.Size = uint64(size)
case ".imagestore", "overlay.imagestore", "overlay2.imagestore":
case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
if val == "" {
continue
}
for _, store := range strings.Split(val, ",") {
store = filepath.Clean(store)
if !filepath.IsAbs(store) {
@@ -304,17 +310,17 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
o.imageStores = append(o.imageStores, store)
}
case ".mount_program", "overlay.mount_program", "overlay2.mount_program":
case "mount_program":
logrus.Debugf("overlay: mount_program=%s", val)
_, err := os.Stat(val)
if err != nil {
return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err)
}
o.mountProgram = val
case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home":
case "skip_mount_home":
logrus.Debugf("overlay: skip_mount_home=%s", val)
o.skipMountHome, err = strconv.ParseBool(val)
case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors":
case "ignore_chown_errors":
logrus.Debugf("overlay: ignore_chown_errors=%s", val)
o.ignoreChownErrors, err = strconv.ParseBool(val)
if err != nil {
@@ -892,19 +898,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
// If the lowers list is still empty, use an empty lower so that we can still force an
// SELinux context for the mount.
// if we are doing a readOnly mount, and there is only one lower
// We should just return the lower directory, no reason to mount.
if !readWrite && d.options.mountProgram == "" {
if len(absLowers) == 0 {
return path.Join(dir, "empty"), nil
}
if len(absLowers) == 1 {
return absLowers[0], nil
}
}
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
relLowers = append(relLowers, path.Join(id, "empty"))

View File

@@ -1,3 +1,5 @@
go 1.15
module github.com/containers/storage
require (
@@ -5,25 +7,24 @@ require (
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.0.0
github.com/klauspost/compress v1.10.7
github.com/klauspost/pgzip v1.2.4
github.com/hashicorp/go-multierror v1.1.0
github.com/klauspost/compress v1.11.0
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/moby/sys/mountinfo v0.1.3
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.0-rc90
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
github.com/opencontainers/selinux v1.5.2
github.com/opencontainers/runc v1.0.0-rc91
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/selinux v1.6.0
github.com/pkg/errors v0.9.1
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.6.0
github.com/stretchr/testify v1.6.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/vbatts/tar-split v0.11.1
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
gotest.tools v2.2.0+incompatible
)
go 1.13

View File

@@ -5,11 +5,17 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6tr
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/checkpoint-restore/go-criu/v4 v4.0.2 h1:jt+rnBIhFtPw0fhtpYGcUOilh4aO9Hj7r+YLEtf30uA=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3 h1:qcqzLJa2xCo9sgdCzpT/SJSYxROTEstuhf7ZBHMirms=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.0 h1:fU3UuQapBs+zLJu82NhR11Rif1ny2zfMMAyPJzSN5tQ=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
@@ -18,6 +24,12 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -25,6 +37,8 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -34,22 +48,24 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -62,16 +78,23 @@ github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvO
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA=
github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc91 h1:Tp8LWs5G8rFpzTsbRjAtQkPVexhCu0bnANE5IfIhJ6g=
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -80,6 +103,12 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
@@ -89,16 +118,24 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -124,10 +161,14 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -137,6 +178,8 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=

View File

@@ -10,6 +10,7 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -465,6 +466,19 @@ func (r *imageStore) addMappedTopLayer(id, layer string) error {
return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
func (r *imageStore) removeMappedTopLayer(id, layer string) error {
if image, ok := r.lookup(id); ok {
initialLen := len(image.MappedTopLayers)
image.MappedTopLayers = stringutils.RemoveFromSlice(image.MappedTopLayers, layer)
// No layer was removed. No need to save.
if initialLen == len(image.MappedTopLayers) {
return nil
}
return r.Save()
}
return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil

View File

@@ -772,7 +772,20 @@ func (r *layerStore) Mounted(id string) (int, error) {
}
func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
if !r.IsReadWrite() {
// check whether options include ro option
hasReadOnlyOpt := func(opts []string) bool {
for _, item := range opts {
if item == "ro" {
return true
}
}
return false
}
// You are not allowed to mount layers from readonly stores if they
// are not mounted read/only.
if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
r.mountsLockfile.Lock()
@@ -1000,6 +1013,7 @@ func (r *layerStore) deleteInternal(id string) error {
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
r.deleteInDigestMap(id)
toDeleteIndex := -1
for i, candidate := range r.layers {
if candidate.ID == id {
@@ -1031,6 +1045,27 @@ func (r *layerStore) deleteInternal(id string) error {
return err
}
func (r *layerStore) deleteInDigestMap(id string) {
for digest, layers := range r.bycompressedsum {
for i, layerID := range layers {
if layerID == id {
layers = append(layers[:i], layers[i+1:]...)
r.bycompressedsum[digest] = layers
break
}
}
}
for digest, layers := range r.byuncompressedsum {
for i, layerID := range layers {
if layerID == id {
layers = append(layers[:i], layers[i+1:]...)
r.byuncompressedsum[digest] = layers
break
}
}
}
}
func (r *layerStore) Delete(id string) error {
layer, ok := r.lookup(id)
if !ok {
@@ -1307,6 +1342,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil {
return -1, err
}
defer idLogger.Close()
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, io.MultiWriter(uncompressedCounter, idLogger)), metadata, storage.NewDiscardFilePutter())
if err != nil {
return -1, err
@@ -1321,7 +1357,6 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return -1, err
}
compressor.Close()
idLogger.Close()
if err == nil {
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
return -1, err

View File

@@ -390,16 +390,20 @@ func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
return mode
}
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
// ReadSecurityXattrToTarHeader reads security.capability, security,image
// xattrs from filesystem to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
capability, err := system.Lgetxattr(path, "security.capability")
if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform {
return err
}
if capability != nil {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
}
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform {
return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path)
}
if capability != nil {
hdr.Xattrs[xattr] = string(capability)
}
}
return nil
}
@@ -598,7 +602,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool) error {
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, buffer []byte) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
@@ -622,7 +626,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if err != nil {
return err
}
if _, err := io.Copy(file, reader); err != nil {
if _, err := io.CopyBuffer(file, reader, buffer); err != nil {
file.Close()
return err
}
@@ -938,6 +942,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMappings.RootPair()
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
buffer := make([]byte, 1<<20)
// Iterate through the files in the archive.
loop:
@@ -1034,7 +1039,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors); err != nil {
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
return err
}

View File

@@ -18,9 +18,11 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta
if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil {
uid = uint32(cuid)
gid = uint32(cgid)
if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil {
oldUID = uint32(oldcuid)
oldGID = uint32(oldcgid)
if oldInfo != nil {
if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil {
oldUID = uint32(oldcuid)
oldGID = uint32(oldcgid)
}
}
}
ownerChanged := uid != oldUID || gid != oldGID

View File

@@ -37,6 +37,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
buffer := make([]byte, 1<<20)
// Iterate through the files in the archive.
for {
@@ -105,7 +106,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors); err != nil {
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
return 0, err
}
}
@@ -196,7 +197,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors); err != nil {
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
return 0, err
}

View File

@@ -39,6 +39,10 @@ type ThinpoolOptionsConfig struct {
// log_level sets the log level of devicemapper.
LogLevel string `toml:"log_level"`
// MetadataSize specifies the size of the metadata for the thinpool
// It will be used with the `pvcreate --metadata` option.
MetadataSize string `toml:"metadatasize"`
// MinFreeSpace specifies the min free space percent in a thin pool
// require for new device creation to
MinFreeSpace string `toml:"min_free_space"`
@@ -218,6 +222,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
if options.Thinpool.LogLevel != "" {
doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
}
if options.Thinpool.MetadataSize != "" {
doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize))
}
if options.Thinpool.MinFreeSpace != "" {
doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
}

View File

@@ -262,7 +262,7 @@ func (p *Pattern) compile() error {
}
}
regStr += "(/.*)?$"
regStr += "(" + escSL + ".*)?$"
re, err := regexp.Compile(regStr)
if err != nil {

View File

@@ -35,9 +35,9 @@ type lockfile struct {
// necessary.
func openLock(path string, ro bool) (int, error) {
if ro {
return unix.Open(path, os.O_RDONLY, 0)
return unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC, 0)
}
return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
return unix.Open(path, os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH)
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
@@ -106,7 +106,6 @@ func (l *lockfile) lock(lType int16, recursive bool) {
if err != nil {
panic(fmt.Sprintf("error opening %q: %v", l.file, err))
}
unix.CloseOnExec(fd)
l.fd = uintptr(fd)
// Optimization: only use the (expensive) fcntl syscall when

View File

@@ -4,8 +4,6 @@ import (
"sort"
"strconv"
"strings"
"github.com/containers/storage/pkg/fileutils"
)
// mountError holds an error from a mount or unmount operation
@@ -43,33 +41,6 @@ func (e *mountError) Cause() error {
return e.err
}
// GetMounts retrieves a list of mounts for the current running process.
func GetMounts() ([]*Info, error) {
return parseMountTable()
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
func Mounted(mountpoint string) (bool, error) {
entries, err := parseMountTable()
if err != nil {
return false, err
}
mountpoint, err = fileutils.ReadSymlinkedPath(mountpoint)
if err != nil {
return false, err
}
// Search the table for the mountpoint
for _, e := range entries {
if e.Mountpoint == mountpoint {
return true, nil
}
}
return false, nil
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See

View File

@@ -1,40 +1,21 @@
package mount
// Info reveals information about a particular mounted filesystem. This
// struct is populated from the content in the /proc/<pid>/mountinfo file.
type Info struct {
// ID is a unique identifier of the mount (may be reused after umount).
ID int
import (
"github.com/containers/storage/pkg/fileutils"
"github.com/moby/sys/mountinfo"
)
// Parent indicates the ID of the mount parent (or of self for the top of the
// mount tree).
Parent int
type Info = mountinfo.Info
// Major indicates one half of the device ID which identifies the device class.
Major int
// Minor indicates one half of the device ID which identifies a specific
// instance of device.
Minor int
// Root of the mount within the filesystem.
Root string
// Mountpoint indicates the mount point relative to the process's root.
Mountpoint string
// Opts represents mount-specific options.
Opts string
// Optional represents optional fields.
Optional string
// Fstype indicates the type of filesystem, such as EXT3.
Fstype string
// Source indicates filesystem specific information or "none".
Source string
// VfsOpts represents per super block options.
VfsOpts string
func GetMounts() ([]*Info, error) {
return mountinfo.GetMounts(nil)
}
// Mounted determines if a specified mountpoint has been mounted.
func Mounted(mountpoint string) (bool, error) {
mountpoint, err := fileutils.ReadSymlinkedPath(mountpoint)
if err != nil {
return false, err
}
return mountinfo.Mounted(mountpoint)
}

View File

@@ -1,120 +1,5 @@
package mount
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
import "github.com/moby/sys/mountinfo"
"github.com/pkg/errors"
)
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
// bind mounts
func parseMountTable() ([]*Info, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f)
}
func parseInfoFile(r io.Reader) ([]*Info, error) {
s := bufio.NewScanner(r)
out := []*Info{}
for s.Scan() {
/*
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
(0)(1)(2) (3) (4) (5) (6) (7) (8) (9) (10)
(0) mount ID: unique identifier of the mount (may be reused after umount)
(1) parent ID: ID of parent (or of self for the top of the mount tree)
(2) major:minor: value of st_dev for files on filesystem
(3) root: root of the mount within the filesystem
(4) mount point: mount point relative to the process's root
(5) mount options: per mount options
(6) optional fields: zero or more fields of the form "tag[:value]"
(7) separator: marks the end of the optional fields
(8) filesystem type: name of filesystem of the form "type[.subtype]"
(9) mount source: filesystem specific information or "none"
(10) super options: per super block options
*/
text := s.Text()
fields := strings.Split(text, " ")
numFields := len(fields)
if numFields < 10 {
// should be at least 10 fields
return nil, errors.Errorf("Parsing %q failed: not enough fields (%d)", text, numFields)
}
p := &Info{}
// ignore any number parsing errors, there should not be any
p.ID, _ = strconv.Atoi(fields[0])
p.Parent, _ = strconv.Atoi(fields[1])
mm := strings.Split(fields[2], ":")
if len(mm) != 2 {
return nil, fmt.Errorf("Parsing %q failed: unexpected minor:major pair %s", text, mm)
}
p.Major, _ = strconv.Atoi(mm[0])
p.Minor, _ = strconv.Atoi(mm[1])
p.Root = fields[3]
p.Mountpoint = fields[4]
p.Opts = fields[5]
// one or more optional fields, when a separator (-)
i := 6
for ; i < numFields && fields[i] != "-"; i++ {
switch i {
case 6:
p.Optional = string(fields[6])
default:
/* NOTE there might be more optional fields before the separator,
such as fields[7] or fields[8], although as of Linux kernel 5.5
the only known ones are mount propagation flags in fields[6].
The correct behavior is to ignore any unknown optional fields.
*/
}
}
if i == numFields {
return nil, fmt.Errorf("Parsing %q failed: missing - separator", text)
}
// There should be 3 fields after the separator...
if i+4 > numFields {
return nil, fmt.Errorf("Parsing %q failed: not enough fields after a - separator", text)
}
// ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
// (like "//serv/My Documents") _may_ end up having a space in the last field
// of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs
// option unc= is ignored, so a space should not appear. In here we ignore
// those "extra" fields caused by extra spaces.
p.Fstype = fields[i+1]
p.Source = fields[i+2]
p.VfsOpts = fields[i+3]
out = append(out, p)
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
// PidMountInfo collects the mounts for a specific process ID. If the process
// ID is unknown, it is better to use `GetMounts` which will inspect
// "/proc/self/mountinfo" instead.
func PidMountInfo(pid int) ([]*Info, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f)
}
var PidMountInfo = mountinfo.PidMountInfo

Some files were not shown because too many files have changed in this diff Show More