mirror of
https://github.com/containers/skopeo.git
synced 2026-01-31 06:19:20 +00:00
Compare commits
448 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bbd87f435 | ||
|
|
bf149c6426 | ||
|
|
ca03debe59 | ||
|
|
2d168e3723 | ||
|
|
2c1ede8449 | ||
|
|
b2a06ed720 | ||
|
|
2874584be4 | ||
|
|
91e801b451 | ||
|
|
b0648d79d4 | ||
|
|
437d608772 | ||
|
|
d57934d529 | ||
|
|
4470b88c50 | ||
|
|
03595a83d0 | ||
|
|
5d24b67f5e | ||
|
|
3b9ee4f322 | ||
|
|
0ca26cce94 | ||
|
|
29528d00ec | ||
|
|
af34f50b8c | ||
|
|
e7b32b1e6a | ||
|
|
4049bf2801 | ||
|
|
ad33537769 | ||
|
|
d5e34c1b5e | ||
|
|
5e586f3781 | ||
|
|
455177e749 | ||
|
|
b85b7319aa | ||
|
|
0b73154601 | ||
|
|
da48399f89 | ||
|
|
08504d913c | ||
|
|
fe67466701 | ||
|
|
47e5d0cd9e | ||
|
|
f0d830a8ca | ||
|
|
6d3f523c57 | ||
|
|
87a36f9d5b | ||
|
|
9d12d72fb7 | ||
|
|
fc88117065 | ||
|
|
0debda0bbb | ||
|
|
49f10736d1 | ||
|
|
3d0d2ea6bb | ||
|
|
a2499d3451 | ||
|
|
7db0aab330 | ||
|
|
150eb5bf18 | ||
|
|
bcc0de69d4 | ||
|
|
cab89b9b9c | ||
|
|
5b95a21401 | ||
|
|
78c83dbcff | ||
|
|
98ced5196c | ||
|
|
63272a10d7 | ||
|
|
07c798ff82 | ||
|
|
750d72873d | ||
|
|
81dddac7d6 | ||
|
|
405b912f7e | ||
|
|
e1884aa8a8 | ||
|
|
ffb01385dd | ||
|
|
c5adc4b580 | ||
|
|
69b9106646 | ||
|
|
565688a963 | ||
|
|
5205f3646d | ||
|
|
3c57a0f084 | ||
|
|
ed2088a4e5 | ||
|
|
8b36001c0e | ||
|
|
cd300805d1 | ||
|
|
8d3d0404fe | ||
|
|
9985f12cd4 | ||
|
|
5a42657cdb | ||
|
|
43d6128036 | ||
|
|
e802625b7c | ||
|
|
105be6a0ab | ||
|
|
8ec2a142c9 | ||
|
|
c4ec970bb2 | ||
|
|
cf7e58a297 | ||
|
|
03233a5ca7 | ||
|
|
9bc847e656 | ||
|
|
22965c443f | ||
|
|
6f23c88e84 | ||
|
|
4afafe9538 | ||
|
|
50dda3492c | ||
|
|
f4a44f00b8 | ||
|
|
dd13a0d60b | ||
|
|
80b751a225 | ||
|
|
8556dd1aa1 | ||
|
|
c43e4cffaf | ||
|
|
44ee5be6db | ||
|
|
cfd1cf6def | ||
|
|
15ce6488dd | ||
|
|
4f199f86f7 | ||
|
|
1f1f6801bb | ||
|
|
1ee776b09b | ||
|
|
8bb9d5f0f2 | ||
|
|
88ea901938 | ||
|
|
fd41f20bb8 | ||
|
|
1d5c681f0f | ||
|
|
c467afa37c | ||
|
|
53a90e51d4 | ||
|
|
62e3747a11 | ||
|
|
882ba36ef8 | ||
|
|
3b699c5248 | ||
|
|
b82945b689 | ||
|
|
3851d89b17 | ||
|
|
4360db9f6d | ||
|
|
355de6c757 | ||
|
|
ceacd8d885 | ||
|
|
bc36bb416b | ||
|
|
e86ff0e79d | ||
|
|
590703db95 | ||
|
|
727212b12f | ||
|
|
593bdfe098 | ||
|
|
ba22d17d1f | ||
|
|
0caee746fb | ||
|
|
d2d41ebc33 | ||
|
|
9272b5177e | ||
|
|
62967259a4 | ||
|
|
224b54c367 | ||
|
|
0734c4ccb3 | ||
|
|
6b9345a5f9 | ||
|
|
ff5694b1a6 | ||
|
|
467a574e34 | ||
|
|
4043ecf922 | ||
|
|
e052488674 | ||
|
|
52aade5356 | ||
|
|
1491651ea9 | ||
|
|
d969934fa4 | ||
|
|
bda45f0d60 | ||
|
|
96e579720e | ||
|
|
9d88725a97 | ||
|
|
def5f4a11a | ||
|
|
c4275519ae | ||
|
|
b164a261cf | ||
|
|
f89bd82dcd | ||
|
|
ab4912a5a1 | ||
|
|
85d737fc29 | ||
|
|
0224d8cd38 | ||
|
|
f0730043c6 | ||
|
|
e0efa0c2b3 | ||
|
|
b9826f0c42 | ||
|
|
94d6767d07 | ||
|
|
226dc99ad4 | ||
|
|
eea384cdf7 | ||
|
|
76f5c6d4c5 | ||
|
|
79ef111398 | ||
|
|
af2998040a | ||
|
|
1f6c140716 | ||
|
|
b08008c5b2 | ||
|
|
c011e81b38 | ||
|
|
683d45ffd6 | ||
|
|
07d6e7db03 | ||
|
|
02ea29a99d | ||
|
|
f1849c6a47 | ||
|
|
03bb3c2f74 | ||
|
|
fdf0bec556 | ||
|
|
c736e69d48 | ||
|
|
d7156f9b3d | ||
|
|
15b3fdf6f4 | ||
|
|
0e1ba1fb70 | ||
|
|
ee590a9795 | ||
|
|
076d41d627 | ||
|
|
4830d90c32 | ||
|
|
2f8cc39a1a | ||
|
|
8602471486 | ||
|
|
1ee74864e9 | ||
|
|
81404fb71c | ||
|
|
845ad88cec | ||
|
|
9b6b57df50 | ||
|
|
fefeeb4c70 | ||
|
|
bbc0c69624 | ||
|
|
dcfcfdaa1e | ||
|
|
e41b0d67d6 | ||
|
|
1ec992abd1 | ||
|
|
b8ae5c6054 | ||
|
|
7c530bc55f | ||
|
|
3377542e27 | ||
|
|
cf5d9ffa49 | ||
|
|
686c3fcd7a | ||
|
|
78a24cea81 | ||
|
|
fd93ebb78d | ||
|
|
56dd3fc928 | ||
|
|
d830304e6d | ||
|
|
4960f390e2 | ||
|
|
9ba6dd71d7 | ||
|
|
dd6441b546 | ||
|
|
09cc6c3199 | ||
|
|
7f7b648443 | ||
|
|
cc571eb1ea | ||
|
|
b3b4e2b8f8 | ||
|
|
28647cf29f | ||
|
|
0c8511f222 | ||
|
|
a515fefda9 | ||
|
|
1215f5fe69 | ||
|
|
93cde78d9b | ||
|
|
1730fd0d5f | ||
|
|
7d58309a4f | ||
|
|
a865c07818 | ||
|
|
cd269a4558 | ||
|
|
2b3af4ad51 | ||
|
|
6ec338aa30 | ||
|
|
fb61d0c98f | ||
|
|
7620193722 | ||
|
|
f8bd406deb | ||
|
|
d9b60e7fc9 | ||
|
|
d0a41799da | ||
|
|
dcc5395140 | ||
|
|
980ff3eadd | ||
|
|
f36fde92d6 | ||
|
|
6b616d1730 | ||
|
|
6dc36483f4 | ||
|
|
574b764391 | ||
|
|
1e795e038b | ||
|
|
7cca84ba57 | ||
|
|
342ba18561 | ||
|
|
d69c51e958 | ||
|
|
8b73542d89 | ||
|
|
1c76bc950d | ||
|
|
141212f27d | ||
|
|
3dcdb1ff7d | ||
|
|
4620d5849c | ||
|
|
a0af3619d3 | ||
|
|
fcdf9c1b91 | ||
|
|
12cc3a9cbf | ||
|
|
bd816574ed | ||
|
|
b3b322e10b | ||
|
|
2c5532746f | ||
|
|
c70e58e6b5 | ||
|
|
879dbc3757 | ||
|
|
1f655f3f09 | ||
|
|
69e08d78ad | ||
|
|
f4f69742ad | ||
|
|
066463201a | ||
|
|
5d589d6d54 | ||
|
|
012f89d16b | ||
|
|
ce42c70d4c | ||
|
|
a48f7597e3 | ||
|
|
d166555fb4 | ||
|
|
5721355da7 | ||
|
|
c00868148e | ||
|
|
7f757cd253 | ||
|
|
a720c22303 | ||
|
|
bd992e3872 | ||
|
|
5207447327 | ||
|
|
f957e894e6 | ||
|
|
0eb841ec8b | ||
|
|
dc1e560d4e | ||
|
|
f69a78fa0b | ||
|
|
efb47cf374 | ||
|
|
507d09876d | ||
|
|
34fe924aff | ||
|
|
03bac73f3a | ||
|
|
fb51eb21e8 | ||
|
|
d0bc564259 | ||
|
|
fc3d809ce2 | ||
|
|
947ac8b2ab | ||
|
|
fa72d057db | ||
|
|
64eb855338 | ||
|
|
416ff71bed | ||
|
|
4ed4525155 | ||
|
|
c813de92d8 | ||
|
|
0420fa0299 | ||
|
|
5b7fcc8eca | ||
|
|
c84203bdd5 | ||
|
|
98bfef9072 | ||
|
|
4ec3b64c84 | ||
|
|
d705644f22 | ||
|
|
9c1cb79754 | ||
|
|
459ab05b22 | ||
|
|
2e7b6c9d14 | ||
|
|
2a8ffee621 | ||
|
|
623865c159 | ||
|
|
58ec828eab | ||
|
|
9835ae579b | ||
|
|
14847101c0 | ||
|
|
3980ac5894 | ||
|
|
d6be447ce9 | ||
|
|
b6fdea03f2 | ||
|
|
f46da343e2 | ||
|
|
d1d1d6533e | ||
|
|
890c073526 | ||
|
|
7e69022723 | ||
|
|
1c16cd5e9d | ||
|
|
362bfc5fe3 | ||
|
|
81d67eab92 | ||
|
|
fc0c5be08d | ||
|
|
824853d85d | ||
|
|
2c78131d1d | ||
|
|
157b9c0f3b | ||
|
|
ee89d2c6a4 | ||
|
|
4e40830eae | ||
|
|
46ffaa8e51 | ||
|
|
649ea391a4 | ||
|
|
4421e7ea2f | ||
|
|
e8794bd9ff | ||
|
|
136fd1d8a6 | ||
|
|
f627fc6045 | ||
|
|
7c2a47f8b9 | ||
|
|
1bfb549f7f | ||
|
|
9914de1bf4 | ||
|
|
f37d72d964 | ||
|
|
3e3748a800 | ||
|
|
61158ce7f4 | ||
|
|
7c17614143 | ||
|
|
d24cdcbcf3 | ||
|
|
4055442da5 | ||
|
|
fb5e5a79f6 | ||
|
|
88bec961af | ||
|
|
fc843adca9 | ||
|
|
3d42f226c2 | ||
|
|
821f938a11 | ||
|
|
76a14985d6 | ||
|
|
d4462330a5 | ||
|
|
d5d6bc28f7 | ||
|
|
8826f09cf4 | ||
|
|
e6886e4afc | ||
|
|
a40d7b53aa | ||
|
|
e0d44861af | ||
|
|
c236b29c75 | ||
|
|
e4315e82b0 | ||
|
|
91b722fec8 | ||
|
|
406ab86104 | ||
|
|
2c90120ce6 | ||
|
|
47d74dba90 | ||
|
|
aafe2a7337 | ||
|
|
63f4f3413f | ||
|
|
50f45932f9 | ||
|
|
da298638a2 | ||
|
|
cd0cef8442 | ||
|
|
1f30fd7bf3 | ||
|
|
3da98694a0 | ||
|
|
9abac5b134 | ||
|
|
ffe92ed2bb | ||
|
|
6f6c2b9c73 | ||
|
|
e44bd98fa4 | ||
|
|
e6049802ba | ||
|
|
43273caab1 | ||
|
|
ad3e26d042 | ||
|
|
9a8529667d | ||
|
|
6becbb2c66 | ||
|
|
8a239596a9 | ||
|
|
68faefed61 | ||
|
|
09399a9ac1 | ||
|
|
23c96cb998 | ||
|
|
6e2cd739da | ||
|
|
5197c8dba0 | ||
|
|
e17b1f97ca | ||
|
|
e4982ea82a | ||
|
|
c9fbb6c1ab | ||
|
|
ecc745d124 | ||
|
|
b806001e18 | ||
|
|
177463ed03 | ||
|
|
9ad71d27e0 | ||
|
|
5b550a7b37 | ||
|
|
8adb5f56de | ||
|
|
29d76eb5ca | ||
|
|
d7cafe671b | ||
|
|
ed3016b1c1 | ||
|
|
09586bb08f | ||
|
|
9adb76bf15 | ||
|
|
0cb6cc6222 | ||
|
|
123891de32 | ||
|
|
6942920ee8 | ||
|
|
891c46ed59 | ||
|
|
7f9c56ab05 | ||
|
|
a82c64b397 | ||
|
|
064d37134b | ||
|
|
6d7c93acf7 | ||
|
|
4f7a49ed78 | ||
|
|
18223121dd | ||
|
|
fe6c392d45 | ||
|
|
4558575d9e | ||
|
|
6c4eab8a07 | ||
|
|
9900b79eb6 | ||
|
|
f420d6867b | ||
|
|
2e8bcf65f6 | ||
|
|
e7a76f750b | ||
|
|
de42d88d2c | ||
|
|
1d5e38454e | ||
|
|
11a0108456 | ||
|
|
1cf2b63483 | ||
|
|
5b1ca76131 | ||
|
|
a23befcbf4 | ||
|
|
c81541de0a | ||
|
|
206a8e3eed | ||
|
|
b3bcf49d46 | ||
|
|
705f393109 | ||
|
|
6841ee321c | ||
|
|
a45e8e1e87 | ||
|
|
ba2dabe62d | ||
|
|
aa627d0844 | ||
|
|
df076baf56 | ||
|
|
59f7abe749 | ||
|
|
6bec0699cb | ||
|
|
7d379cf87a | ||
|
|
39b06cb31c | ||
|
|
601f76f96d | ||
|
|
2f2a688026 | ||
|
|
a2e9d08e38 | ||
|
|
b6a38edfcb | ||
|
|
9a92a10bba | ||
|
|
5ae0402bf0 | ||
|
|
313dafe928 | ||
|
|
f4ddde7f47 | ||
|
|
4a2a78b63b | ||
|
|
35dd662fea | ||
|
|
7769a21cef | ||
|
|
a50211ce2a | ||
|
|
d54a10f490 | ||
|
|
3098898a98 | ||
|
|
cab18e48ad | ||
|
|
a8a3cc3525 | ||
|
|
96d6a58052 | ||
|
|
e15276232e | ||
|
|
daeb358572 | ||
|
|
55622350c4 | ||
|
|
29d189b581 | ||
|
|
d947d90bf7 | ||
|
|
f3efa063e3 | ||
|
|
0ff261802b | ||
|
|
fb236c85af | ||
|
|
9c4ceeb147 | ||
|
|
fc761ed74f | ||
|
|
e66541f7d0 | ||
|
|
000f31fb73 | ||
|
|
bc8041add8 | ||
|
|
21229685cf | ||
|
|
fd9c615d88 | ||
|
|
90361256bc | ||
|
|
677f711c6c | ||
|
|
488a535aa0 | ||
|
|
e2839c38c5 | ||
|
|
ee7c5ebae9 | ||
|
|
938478e702 | ||
|
|
837fc231a9 | ||
|
|
429a4b0aec | ||
|
|
e332d0e5d7 | ||
|
|
2e917cf146 | ||
|
|
e7020c2d8c | ||
|
|
9e971b4937 | ||
|
|
bd018696bd | ||
|
|
ad7eb5d221 | ||
|
|
80ccbaa021 | ||
|
|
c24b42177e | ||
|
|
6fc6d809e0 | ||
|
|
0d95328125 | ||
|
|
41dbbc9b50 | ||
|
|
323b56a049 | ||
|
|
ea643e8658 | ||
|
|
cada464c90 | ||
|
|
0da4307aea | ||
|
|
143e3602ae | ||
|
|
0314fdb49e | ||
|
|
847b5bff85 | ||
|
|
864568bbd9 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
/skopeo
|
||||
/skopeo.1
|
||||
/docs/skopeo.1
|
||||
/layers-*
|
||||
/skopeo
|
||||
|
||||
142
CONTRIBUTING.md
Normal file
142
CONTRIBUTING.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Contributing to Skopeo
|
||||
|
||||
We'd love to have you join the community! Below summarizes the processes
|
||||
that we follow.
|
||||
|
||||
## Topics
|
||||
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [Communications](#communications)
|
||||
* [Becoming a Maintainer](#becoming-a-maintainer)
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/projectatomic/skopeo/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
|
||||
If you find a new issue with the project we'd love to hear about it! The most
|
||||
important aspect of a bug report is that it includes enough information for
|
||||
us to reproduce it. So, please include as much detail as possible and try
|
||||
to remove the extra stuff that doesn't really relate to the issue itself.
|
||||
The easier it is for us to reproduce it, the faster it'll be fixed!
|
||||
|
||||
Please don't include any private/sensitive information in your issue!
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
No Pull Request (PR) is too small! Typos, additional comments in the code,
|
||||
new testcases, bug fixes, new features, more documentation, ... it's all
|
||||
welcome!
|
||||
|
||||
While bug fixes can first be identified via an "issue", that is not required.
|
||||
It's ok to just open up a PR with the fix, but make sure you include the same
|
||||
information you would have included in an issue - like how to reproduce it.
|
||||
|
||||
PRs for new features should include some background on what use cases the
|
||||
new code is trying to address. And, when possible and it makes, try to break-up
|
||||
larger PRs into smaller ones - it's easier to review smaller
|
||||
code changes. But, only if those smaller ones make sense as stand-alone PRs.
|
||||
|
||||
Regardless of the type of PR, all PRs should include:
|
||||
* well documented code changes
|
||||
* additional testcases. Ideally, they should fail w/o your code change applied
|
||||
* documentation changes
|
||||
|
||||
Squash your commits into logical pieces of work that might want to be reviewed
|
||||
separate from the rest of the PRs. But, squashing down to just one commit is ok
|
||||
too since in the end the entire PR will be reviewed anyway. When in doubt,
|
||||
squash.
|
||||
|
||||
PRs that fix issues should include a reference like `Closes #XXXX` in the
|
||||
commit message so that github will automatically close the referenced issue
|
||||
when the PR is merged.
|
||||
|
||||
<!--
|
||||
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
|
||||
-->
|
||||
|
||||
### Sign your PRs
|
||||
|
||||
The sign-off is a line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or dicsussions, please use the
|
||||
IRC group on `irc.freenode.net` called `container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/projectatomic/skopeo/issues)
|
||||
and
|
||||
[PRs](https://github.com/projectatomic/skopeo/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
## Becoming a Maintainer
|
||||
|
||||
To become a maintainer you must first be nominated by an existing maintainer.
|
||||
If a majority (>50%) of maintainers agree then the proposal is adopted and
|
||||
you will be added to the list.
|
||||
|
||||
Removing a maintainer requires at least 75% of the remaining maintainers
|
||||
approval, or if the person requests to be removed then it is automatic.
|
||||
Normally, a maintainer will only be removed if they are considered to be
|
||||
inactive for a long period of time or are viewed as disruptive to the community.
|
||||
|
||||
The current list of maintainers can be found in the
|
||||
[MAINTAINERS](MAINTAINERS) file.
|
||||
-->
|
||||
39
Dockerfile
39
Dockerfile
@@ -1,22 +1,18 @@
|
||||
FROM fedora
|
||||
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man \
|
||||
# storage deps
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
# gpgme bindings deps
|
||||
libassuan-devel gpgme-devel \
|
||||
gnupg \
|
||||
# registry v1 deps
|
||||
xz-devel \
|
||||
python-devel \
|
||||
python-pip \
|
||||
swig \
|
||||
redhat-rpm-config \
|
||||
openssl-devel \
|
||||
patch
|
||||
ostree-devel \
|
||||
gnupg
|
||||
|
||||
# Install three versions of the registry. The first is an older version that
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests. Install registry v1 also.
|
||||
# and schema2 manifests.
|
||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||
RUN set -x \
|
||||
@@ -28,17 +24,16 @@ RUN set -x \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
|
||||
&& rm -rf "$GOPATH" \
|
||||
&& export DRV1="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/docker-registry.git "$DRV1" \
|
||||
# no need for setuptools since we have a version conflict with fedora
|
||||
&& sed -i.bak s/setuptools==5.8//g "$DRV1/requirements/main.txt" \
|
||||
&& sed -i.bak s/setuptools==5.8//g "$DRV1/depends/docker-registry-core/requirements/main.txt" \
|
||||
&& pip install "$DRV1/depends/docker-registry-core" \
|
||||
&& pip install file://"$DRV1#egg=docker-registry[bugsnag,newrelic,cors]" \
|
||||
&& patch $(python -c 'import boto; import os; print os.path.dirname(boto.__file__)')/connection.py \
|
||||
< "$DRV1/contrib/boto_header_patch.diff" \
|
||||
&& dnf -y update && dnf install -y m2crypto
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
RUN set -x \
|
||||
&& yum install -y which git tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
&& export GOPATH=$(mktemp -d) \
|
||||
&& git clone -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
&& (cd "$GOPATH/src/github.com/openshift/origin" && make clean build && make all WHAT=cmd/dockerregistry) \
|
||||
&& cp -a "$GOPATH/src/github.com/openshift/origin/_output/local/bin/linux"/*/* /usr/local/bin \
|
||||
&& cp "$GOPATH/src/github.com/openshift/origin/images/dockerregistry/config.yml" /atomic-registry-config.yml \
|
||||
&& mkdir /registry
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
|
||||
14
Dockerfile.build
Normal file
14
Dockerfile.build
Normal file
@@ -0,0 +1,14 @@
|
||||
FROM ubuntu:17.04
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
golang \
|
||||
btrfs-tools \
|
||||
git-core \
|
||||
libdevmapper-dev \
|
||||
libgpgme11-dev \
|
||||
go-md2man \
|
||||
libglib2.0-dev \
|
||||
libostree-dev
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/projectatomic/skopeo
|
||||
468
LICENSE
468
LICENSE
@@ -1,339 +1,189 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
Preamble
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
1. Definitions.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
80
Makefile
80
Makefile
@@ -1,12 +1,19 @@
|
||||
.PHONY: all binary build clean install install-binary shell test-integration
|
||||
.PHONY: all binary build-container build-local clean install install-binary install-completions shell test-integration
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
PREFIX ?= ${DESTDIR}/usr
|
||||
INSTALLDIR=${PREFIX}/bin
|
||||
MANINSTALLDIR=${PREFIX}/share/man
|
||||
# TODO(runcom)
|
||||
#BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= go-md2man
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
@@ -23,26 +30,63 @@ DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
all: binary
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
|
||||
binary:
|
||||
go build -ldflags "-X main.gitCommit=${GIT_COMMIT}" -o ${DEST}skopeo ./cmd/skopeo
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
# make all DEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs
|
||||
|
||||
# Build a docker image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using Docker containers
|
||||
binary-local:
|
||||
go build -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
binary-local-static:
|
||||
go build -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
build-container:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(GO_MD2MAN) -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
|
||||
|
||||
.PHONY: docs
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
|
||||
clean:
|
||||
rm -f skopeo
|
||||
rm -f skopeo docs/*.1
|
||||
|
||||
install: install-binary
|
||||
install -m 644 man1/skopeo.1 ${MANINSTALLDIR}/man1/
|
||||
# TODO(runcom)
|
||||
#install -m 644 completion/bash/skopeo ${BASHINSTALLDIR}/
|
||||
install: install-binary install-docs install-completions
|
||||
install -d -m 755 ${SIGSTOREDIR}
|
||||
install -D -m 644 default-policy.json ${CONTAINERSSYSCONFIGDIR}/policy.json
|
||||
install -D -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
|
||||
|
||||
install-binary:
|
||||
install -d -m 0755 ${INSTALLDIR}
|
||||
install -m 755 skopeo ${INSTALLDIR}
|
||||
install-binary: ./skopeo
|
||||
install -D -m 755 skopeo ${INSTALLDIR}/skopeo
|
||||
|
||||
install-docs: docs/skopeo.1
|
||||
install -D -m 644 docs/skopeo.1 ${MANINSTALLDIR}/man1/skopeo.1
|
||||
|
||||
install-completions:
|
||||
install -m 644 -D completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
@@ -51,20 +95,20 @@ check: validate test-unit test-integration
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; hack/make.sh test-integration'
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences, e.g. GO15VENDOREXPERIMENT.
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
|
||||
validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
test-unit-local:
|
||||
go test $$(go list -e ./... | grep -v '^github\.com/projectatomic/skopeo/\(integration\|vendor/.*\)$$')
|
||||
go test -tags "$(BUILDTAGS)" $$(go list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/projectatomic/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
195
README.md
195
README.md
@@ -1,59 +1,75 @@
|
||||
skopeo [](https://travis-ci.org/projectatomic/skopeo)
|
||||
=
|
||||
|
||||
_Please be aware `skopeo` is still work in progress_
|
||||
_Please be aware `skopeo` is still work in progress and it currently supports only registry API V2_
|
||||
|
||||
`skopeo` is a command line utility which is able to _inspect_ a repository on a Docker registry and fetch images layers.
|
||||
`skopeo` is a command line utility for various operations on container images and image repositories.
|
||||
|
||||
Inspecting a repository
|
||||
-
|
||||
`skopeo` is able to _inspect_ a repository on a Docker registry and fetch images layers.
|
||||
By _inspect_ I mean it fetches the repository's manifest and it is able to show you a `docker inspect`-like
|
||||
json output about a whole repository or a tag. This tool, in contrast to `docker inspect`, helps you gather useful information about
|
||||
a repository or a tag before pulling it (using disk space) - e.g. - which tags are available for the given repository? which labels the image has?
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
# show repository's labels of rhel7:latest
|
||||
$ skopeo inspect docker://registry.access.redhat.com/rhel7 | jq '.Config.Labels'
|
||||
# show properties of fedora:latest
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Architecture": "x86_64",
|
||||
"Authoritative_Registry": "registry.access.redhat.com",
|
||||
"BZComponent": "rhel-server-docker",
|
||||
"Build_Host": "rcm-img04.build.eng.bos.redhat.com",
|
||||
"Name": "rhel7/rhel",
|
||||
"Release": "38",
|
||||
"Vendor": "Red Hat, Inc.",
|
||||
"Version": "7.2"
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Tag": "latest",
|
||||
"Digest": "sha256:cfd8f071bf8da7a466748f522406f7ae5908d002af1b1a1c0dcf893e183e5b32",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-03-04T18:40:02.92155334Z",
|
||||
"DockerVersion": "1.9.1",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:236608c7b546e2f4e7223526c74fc71470ba06d46ec82aeb402e704bfdee02a2",
|
||||
"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
]
|
||||
}
|
||||
|
||||
# show repository's tags
|
||||
$ skopeo inspect docker://docker.io/fedora | jq '.RepoTags'
|
||||
[
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
]
|
||||
|
||||
# show image's digest
|
||||
# show unverifed image's digest
|
||||
$ skopeo inspect docker://docker.io/fedora:rawhide | jq '.Digest'
|
||||
"sha256:905b4846938c8aef94f52f3e41a11398ae5b40f5855fb0e40ed9c157e721d7f8"
|
||||
```
|
||||
|
||||
# show image's label "Name"
|
||||
$ skopeo inspect docker://registry.access.redhat.com/rhel7 | jq '.Config.Labels.Name'
|
||||
"rhel7/rhel"
|
||||
Copying images
|
||||
-
|
||||
`skopeo` can copy container images between various storage mechanisms,
|
||||
e.g. Docker registries (including the Docker Hub), the Atomic Registry,
|
||||
local directories, and local OCI-layout directories:
|
||||
|
||||
```sh
|
||||
$ skopeo copy docker://busybox:1-glibc atomic:myns/unsigned:streaming
|
||||
$ skopeo copy docker://busybox:latest dir:existingemptydirectory
|
||||
$ skopeo copy docker://busybox:latest oci:busybox_ocilayout
|
||||
```
|
||||
|
||||
Deleting images
|
||||
-
|
||||
For example,
|
||||
```sh
|
||||
$ skopeo delete docker://localhost:5000/imagename:latest
|
||||
```
|
||||
|
||||
Private registries with authentication
|
||||
-
|
||||
When interacting with private registries, `skopeo` first looks for the Docker's cli config file (usually located at `$HOME/.docker/config.json`) to get the credentials needed to authenticate. When the file isn't available it falls back looking for `--username` and `--password` flags. The ultimate fallback, as Docker does, is to provide an empty authentication when interacting with those registries.
|
||||
When interacting with private registries, `skopeo` first looks for `--creds` (for `skopeo inspect|delete`) or `--src-creds|--dest-creds` (for `skopeo copy`) flags. If those aren't provided, it looks for the Docker's cli config file (usually located at `$HOME/.docker/config.json`) to get the credentials needed to authenticate. The ultimate fallback, as Docker does, is to provide an empty authentication when interacting with those registries.
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
# on my system
|
||||
$ skopeo --help | grep docker-cfg
|
||||
--docker-cfg "/home/runcom/.docker" Docker's cli config for auth
|
||||
|
||||
$ cat /home/runcom/.docker/config.json
|
||||
{
|
||||
"auths": {
|
||||
@@ -69,68 +85,121 @@ $ skopeo inspect docker://myregistrydomain.com:5000/busybox
|
||||
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
|
||||
|
||||
# let's try now to fake a non existent Docker's config file
|
||||
$ skopeo --docker-cfg="" inspect docker://myregistrydomain.com:5000/busybox
|
||||
FATA[0000] Get https://myregistrydomain.com:5000/v2/busybox/manifests/latest: no basic auth credentials
|
||||
$ cat /home/runcom/.docker/config.json
|
||||
{}
|
||||
|
||||
# passing --username and --password - we can see that everything goes fine
|
||||
$ skopeo --docker-cfg="" --username=testuser --password=testpassword inspect docker://myregistrydomain.com:5000/busybox
|
||||
$ skopeo inspect docker://myregistrydomain.com:5000/busybox
|
||||
FATA[0000] unauthorized: authentication required
|
||||
|
||||
# passing --creds - we can see that everything goes fine
|
||||
$ skopeo inspect --creds=testuser:testpassword docker://myregistrydomain.com:5000/busybox
|
||||
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
|
||||
|
||||
# skopeo copy example:
|
||||
$ skopeo copy --src-creds=testuser:testpassword docker://myregistrydomain.com:5000/private oci:local_oci_image
|
||||
```
|
||||
If your cli config is found but it doesn't contain the necessary credentials for the queried registry
|
||||
you'll get an error. You can fix this by either logging in (via `docker login`) or providing `--username`
|
||||
and `--password`.
|
||||
you'll get an error. You can fix this by either logging in (via `docker login`) or providing `--creds` or `--src-creds|--dest-creds`.
|
||||
|
||||
Building
|
||||
-
|
||||
To build `skopeo` you need at least Go 1.5 because it uses the latest `GO15VENDOREXPERIMENT` flag. Also, make sure to clone the repository in your `GOPATH` - otherwise compilation fails.
|
||||
To build the `skopeo` binary you need at least Go 1.5 because it uses the latest `GO15VENDOREXPERIMENT` flag.
|
||||
|
||||
There are two ways to build skopeo: in a container, or locally without a container. Choose the one which better matches your needs and environment.
|
||||
|
||||
### Building without a container
|
||||
Building without a container requires a bit more manual work and setup in your environment, but it is more flexible:
|
||||
- It should work in more environments (e.g. for native macOS builds)
|
||||
- It does not require root privileges (after dependencies are installed)
|
||||
- It is faster, therefore more convenient for developing `skopeo`.
|
||||
|
||||
Install the necessary dependencies:
|
||||
```sh
|
||||
$ cd $GOPATH/src
|
||||
$ mkdir -p github.com/projectatomic
|
||||
$ cd projectatomic
|
||||
$ git clone https://github.com/projectatomic/skopeo
|
||||
$ cd skopeo && make binary
|
||||
Fedora$ sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel
|
||||
macOS$ brew install gpgme
|
||||
```
|
||||
|
||||
You may need to install additional development packages: gpgme-devel and libassuan-devel
|
||||
Make sure to clone this repository in your `GOPATH` - otherwise compilation fails.
|
||||
|
||||
```sh
|
||||
# dnf install gpgme-devel libassuan-devel
|
||||
$ git clone https://github.com/projectatomic/skopeo $GOPATH/src/github.com/projectatomic/skopeo
|
||||
$ cd $GOPATH/src/github.com/projectatomic/skopeo && make binary-local
|
||||
```
|
||||
|
||||
Man:
|
||||
-
|
||||
To build the man page you need [`go-md2man`](https://github.com/cpuguy83/go-md2man) available on your system, then:
|
||||
### Building in a container
|
||||
Building in a container is simpler, but more restrictive:
|
||||
- It requires the `docker` command and the ability to run Linux containers
|
||||
- The created executable is a Linux executable, and depends on dynamic libraries which may only be available only in a container of a similar Linux distribution.
|
||||
|
||||
```sh
|
||||
$ make binary # Or (make all) to also build documentation, see below.
|
||||
```
|
||||
$ make man
|
||||
|
||||
### Building documentation
|
||||
To build the manual you will need go-md2man.
|
||||
```sh
|
||||
Debian$ sudo apt-get install go-md2man
|
||||
Fedora$ sudo dnf install go-md2man
|
||||
```
|
||||
Then
|
||||
```sh
|
||||
$ make docs
|
||||
```
|
||||
|
||||
Installing
|
||||
-
|
||||
If you built from source:
|
||||
```sh
|
||||
$ sudo make install
|
||||
```
|
||||
`skopeo` is also available from Fedora 23:
|
||||
`skopeo` is also available from Fedora 23 (and later):
|
||||
```sh
|
||||
sudo dnf install skopeo
|
||||
```
|
||||
Tests
|
||||
-
|
||||
_You need Docker installed on your system in order to run the test suite_
|
||||
```sh
|
||||
$ make test-integration
|
||||
$ sudo dnf install skopeo
|
||||
```
|
||||
TODO
|
||||
-
|
||||
- update README with `layers` command
|
||||
- list all images on registry?
|
||||
- registry v2 search?
|
||||
- download layers in parallel and support docker load tar(s)
|
||||
- show repo tags via flag or when reference isn't tagged or digested
|
||||
- add tests (integration with deployed registries in container - Docker-like)
|
||||
- support rkt/appc image spec
|
||||
|
||||
NOT TODO
|
||||
-
|
||||
- provide a _format_ flag - just use the awesome [jq](https://stedolan.github.io/jq/)
|
||||
|
||||
CONTRIBUTING
|
||||
-
|
||||
|
||||
### Dependencies management
|
||||
|
||||
`skopeo` uses [`vndr`](https://github.com/LK4D4/vndr) for dependencies management.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `vndr github.com/pkg/errors`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `vndr github.com/pkg/errors`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `vndr github.com/containers/image`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `vndr github.com/containers/image`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
|
||||
License
|
||||
-
|
||||
GPLv2
|
||||
skopeo is licensed under the Apache License, Version 2.0. See
|
||||
[LICENSE](LICENSE) for the full license text.
|
||||
|
||||
@@ -1,120 +1,125 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/projectatomic/skopeo/docker/utils"
|
||||
"github.com/projectatomic/skopeo/signature"
|
||||
"github.com/containers/image/copy"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// FIXME: Also handle schema2, and put this elsewhere:
|
||||
// docker.go contains similar code, more sophisticated
|
||||
// (at the very least the deduplication should be reused from there).
|
||||
func manifestLayers(manifest []byte) ([]string, error) {
|
||||
man := manifestSchema1{}
|
||||
if err := json.Unmarshal(manifest, &man); err != nil {
|
||||
return nil, err
|
||||
// contextsFromGlobalOptions returns source and destionation types.SystemContext depending on c.
|
||||
func contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.SystemContext, error) {
|
||||
sourceCtx, err := contextFromGlobalOptions(c, "src-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
layers := []string{}
|
||||
for _, layer := range man.FSLayers {
|
||||
layers = append(layers, layer.BlobSum)
|
||||
destinationCtx, err := contextFromGlobalOptions(c, "dest-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return layers, nil
|
||||
|
||||
return sourceCtx, destinationCtx, nil
|
||||
}
|
||||
|
||||
// FIXME: this is a copy from docker_image.go and does not belong here.
|
||||
type manifestSchema1 struct {
|
||||
Name string
|
||||
Tag string
|
||||
FSLayers []struct {
|
||||
BlobSum string `json:"blobSum"`
|
||||
} `json:"fsLayers"`
|
||||
History []struct {
|
||||
V1Compatibility string `json:"v1Compatibility"`
|
||||
} `json:"history"`
|
||||
// TODO(runcom) verify the downloaded manifest
|
||||
//Signature []byte `json:"signature"`
|
||||
}
|
||||
|
||||
func copyHandler(context *cli.Context) {
|
||||
func copyHandler(context *cli.Context) error {
|
||||
if len(context.Args()) != 2 {
|
||||
logrus.Fatal("Usage: copy source destination")
|
||||
return errors.New("Usage: copy source destination")
|
||||
}
|
||||
|
||||
src, err := parseImageSource(context, context.Args()[0])
|
||||
policyContext, err := getPolicyContext(context)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error initializing %s: %s", context.Args()[0], err.Error())
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
dest, err := parseImageDestination(context, context.Args()[1])
|
||||
srcRef, err := alltransports.ParseImageName(context.Args()[0])
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error initializing %s: %s", context.Args()[1], err.Error())
|
||||
return fmt.Errorf("Invalid source name %s: %v", context.Args()[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(context.Args()[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", context.Args()[1], err)
|
||||
}
|
||||
signBy := context.String("sign-by")
|
||||
removeSignatures := context.Bool("remove-signatures")
|
||||
|
||||
manifest, _, err := src.GetManifest([]string{utils.DockerV2Schema1MIMEType})
|
||||
sourceCtx, destinationCtx, err := contextsFromGlobalOptions(context)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error reading manifest: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
layers, err := manifestLayers(manifest)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error parsing manifest: %s", err.Error())
|
||||
}
|
||||
for _, layer := range layers {
|
||||
stream, err := src.GetLayer(layer)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error reading layer %s: %s", layer, err.Error())
|
||||
}
|
||||
defer stream.Close()
|
||||
if err := dest.PutLayer(layer, stream); err != nil {
|
||||
logrus.Fatalf("Error writing layer: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
sigs, err := src.GetSignatures()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error reading signatures: %s", err.Error())
|
||||
}
|
||||
|
||||
if signBy != "" {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error initializing GPG: %s", err.Error())
|
||||
}
|
||||
dockerReference, err := dest.CanonicalDockerReference()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error determining canonical Docker reference: %s", err.Error())
|
||||
}
|
||||
|
||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference, mech, signBy)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error creating signature: %s", err.Error())
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
if err := dest.PutSignatures(sigs); err != nil {
|
||||
logrus.Fatalf("Error writing signatures: %s", err.Error())
|
||||
}
|
||||
|
||||
// FIXME: We need to call PutManifest after PutLayer and PutSignatures. This seems ugly; move to a "set properties" + "commit" model?
|
||||
if err := dest.PutManifest(manifest); err != nil {
|
||||
logrus.Fatalf("Error writing manifest: %s", err.Error())
|
||||
}
|
||||
return copy.Image(policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: removeSignatures,
|
||||
SignBy: signBy,
|
||||
ReportWriter: os.Stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
})
|
||||
}
|
||||
|
||||
var copyCmd = cli.Command{
|
||||
Name: "copy",
|
||||
Action: copyHandler,
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
|
||||
Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: copyHandler,
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "sign the image using a GPG key with the specified fingerprint",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-creds, screds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the source registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-creds, dcreds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the destination registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the source registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "src-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container source registry (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the destination registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "dest-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container destination registry (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-ostree-tmp-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
62
cmd/skopeo/delete.go
Normal file
62
cmd/skopeo/delete.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func deleteHandler(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(context.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", context.Args()[0], err)
|
||||
}
|
||||
|
||||
ctx, err := contextFromGlobalOptions(context, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ref.DeleteImage(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var deleteCmd = cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Delete an "IMAGE_NAME" from a transport
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: deleteHandler,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -3,18 +3,23 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/projectatomic/skopeo/docker/utils"
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// inspectOutput is the output format of (skopeo inspect), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type inspectOutput struct {
|
||||
Name string
|
||||
Tag string
|
||||
Digest string
|
||||
Name string `json:",omitempty"`
|
||||
Tag string `json:",omitempty"`
|
||||
Digest digest.Digest
|
||||
RepoTags []string
|
||||
Created time.Time
|
||||
DockerVersion string
|
||||
@@ -26,43 +31,68 @@ type inspectOutput struct {
|
||||
|
||||
var inspectCmd = cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "inspect images on a registry",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
img, err := parseImage(c)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
rawManifest, err := img.Manifest()
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
if c.Bool("raw") {
|
||||
fmt.Println(string(rawManifest))
|
||||
return
|
||||
_, err := c.App.Writer.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
manifestDigest, err := utils.ManifestDigest(rawManifest)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error computing manifest digest: %s", err.Error())
|
||||
}
|
||||
repoTags, err := img.GetRepositoryTags()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error determining repository tags: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: imgInspect.Name,
|
||||
Tag: imgInspect.Tag,
|
||||
Digest: manifestDigest,
|
||||
RepoTags: repoTags,
|
||||
Name: "", // Possibly overridden for a docker.Image.
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for a docker.Image.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
@@ -70,10 +100,28 @@ var inspectCmd = cli.Command{
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerImg, ok := img.(*docker.Image); ok {
|
||||
outputData.Name = dockerImg.SourceRefFullName()
|
||||
outputData.RepoTags, err = dockerImg.GetRepositoryTags()
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(out))
|
||||
fmt.Fprintln(c.App.Writer, string(out))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,21 +1,124 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// TODO(runcom): document args and usage
|
||||
var layersCmd = cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "get images layers",
|
||||
Action: func(c *cli.Context) {
|
||||
img, err := parseImage(c)
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if c.NArg() == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
rawSource, err := parseImageSource(c, c.Args()[0], []string{
|
||||
// TODO: skopeo layers only supports these now
|
||||
// eventually we'll remove this command altogether...
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
manifest.DockerV2Schema1MediaType,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
if err := img.Layers(c.Args().Tail()...); err != nil {
|
||||
logrus.Fatal(err)
|
||||
src, err := image.FromSource(rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var blobDigests []digest.Digest
|
||||
for _, dString := range c.Args().Tail() {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobDigests = append(blobDigests, d)
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, info.Digest)
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, configInfo.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, digest := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(types.BlobInfo{Digest: digest, Size: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(r, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dest.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -5,68 +5,98 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// gitCommit will be the hash that the binary was built from
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
const (
|
||||
usage = `interact with registries`
|
||||
)
|
||||
|
||||
func main() {
|
||||
// createApp returns a cli.App to be run or tested.
|
||||
func createApp() *cli.App {
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Name = "skopeo"
|
||||
if gitCommit != "" {
|
||||
app.Version = fmt.Sprintf("%s commit: %s", version.Version, gitCommit)
|
||||
} else {
|
||||
app.Version = version.Version
|
||||
}
|
||||
app.Usage = usage
|
||||
// TODO(runcom)
|
||||
//app.EnableBashCompletion = true
|
||||
app.Usage = "Various operations with container images and container image registries"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "username",
|
||||
Value: "",
|
||||
Usage: "registry username",
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
Hidden: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "password",
|
||||
Name: "policy",
|
||||
Value: "",
|
||||
Usage: "registry password",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-path",
|
||||
Value: "",
|
||||
Usage: "Certificates path to connect to the given registry (cert.pem, key.pem)",
|
||||
Usage: "Path to a trust policy file",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "Whether to verify certificates or not",
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries.d",
|
||||
Value: "",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
if c.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if c.GlobalIsSet("tls-verify") {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
copyCmd,
|
||||
inspectCmd,
|
||||
layersCmd,
|
||||
deleteCmd,
|
||||
manifestDigestCmd,
|
||||
standaloneSignCmd,
|
||||
standaloneVerifyCmd,
|
||||
untrustedSignatureDumpCmd,
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
app := createApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// getPolicyContext handles the global "policy" flag.
|
||||
func getPolicyContext(c *cli.Context) (*signature.PolicyContext, error) {
|
||||
policyPath := c.GlobalString("policy")
|
||||
var policy *signature.Policy // This could be cached across calls, if we had an application context.
|
||||
var err error
|
||||
if c.GlobalBool("insecure-policy") {
|
||||
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
||||
} else if policyPath == "" {
|
||||
policy, err = signature.DefaultPolicy(nil)
|
||||
} else {
|
||||
policy, err = signature.NewPolicyFromFile(policyPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
14
cmd/skopeo/main_test.go
Normal file
14
cmd/skopeo/main_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package main
|
||||
|
||||
import "bytes"
|
||||
|
||||
// runSkopeo creates an app object and runs it with args, with an implied first "skopeo".
|
||||
// Returns output intended for stdout and the returned error, if any.
|
||||
func runSkopeo(args ...string) (string, error) {
|
||||
app := createApp()
|
||||
stdout := bytes.Buffer{}
|
||||
app.Writer = &stdout
|
||||
args = append([]string{"skopeo"}, args...)
|
||||
err := app.Run(args)
|
||||
return stdout.String(), err
|
||||
}
|
||||
35
cmd/skopeo/manifest.go
Normal file
35
cmd/skopeo/manifest.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func manifestDigest(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
return errors.New("Usage: skopeo manifest-digest manifest")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
digest, err := manifest.Digest(man)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing digest: %v", err)
|
||||
}
|
||||
fmt.Fprintf(context.App.Writer, "%s\n", digest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var manifestDigestCmd = cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: manifestDigest,
|
||||
}
|
||||
31
cmd/skopeo/manifest_test.go
Normal file
31
cmd/skopeo/manifest_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestManifestDigest(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"manifest-digest"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("manifest-digest", "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
|
||||
// Error computing manifest
|
||||
out, err = runSkopeo("manifest-digest", "fixtures/v2s1-invalid-signatures.manifest.json")
|
||||
assertTestFailed(t, out, err, "computing digest")
|
||||
|
||||
// Success
|
||||
out, err = runSkopeo("manifest-digest", "fixtures/image.manifest.json")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest.String()+"\n", out)
|
||||
}
|
||||
@@ -1,18 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/projectatomic/skopeo/signature"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func standaloneSign(context *cli.Context) {
|
||||
func standaloneSign(context *cli.Context) error {
|
||||
outputFile := context.String("output")
|
||||
if len(context.Args()) != 3 || outputFile == "" {
|
||||
logrus.Fatal("Usage: skopeo standalone-sign manifest docker-reference key-fingerprint -o signature")
|
||||
return errors.New("Usage: skopeo standalone-sign manifest docker-reference key-fingerprint -o signature")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
dockerReference := context.Args()[1]
|
||||
@@ -20,38 +21,41 @@ func standaloneSign(context *cli.Context) {
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error reading %s: %s", manifestPath, err.Error())
|
||||
return fmt.Errorf("Error reading %s: %v", manifestPath, err)
|
||||
}
|
||||
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error initializing GPG: %s", err.Error())
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
signature, err := signature.SignDockerManifest(manifest, dockerReference, mech, fingerprint)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error creating signature: %s", err.Error())
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(outputFile, signature, 0644); err != nil {
|
||||
logrus.Fatalf("Error writing signature to %s: %s", outputFile, err.Error())
|
||||
return fmt.Errorf("Error writing signature to %s: %v", outputFile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneSignCmd = cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
Action: standaloneSign,
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: standaloneSign,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output signature file name",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func standaloneVerify(context *cli.Context) {
|
||||
func standaloneVerify(context *cli.Context) error {
|
||||
if len(context.Args()) != 4 {
|
||||
logrus.Fatal("Usage: skopeo standalone-verify manifest docker-reference key-fingerprint signature")
|
||||
return errors.New("Usage: skopeo standalone-verify manifest docker-reference key-fingerprint signature")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
expectedDockerReference := context.Args()[1]
|
||||
@@ -60,27 +64,67 @@ func standaloneVerify(context *cli.Context) {
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error reading manifest from %s: %s", signaturePath, err.Error())
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
unverifiedSignature, err := ioutil.ReadFile(signaturePath)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error reading signature from %s: %s", signaturePath, err.Error())
|
||||
return fmt.Errorf("Error reading signature from %s: %v", signaturePath, err)
|
||||
}
|
||||
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error initializing GPG: %s", err.Error())
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
sig, err := signature.VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, expectedFingerprint)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error verifying signature: %s", err.Error())
|
||||
return fmt.Errorf("Error verifying signature: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
fmt.Fprintf(context.App.Writer, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneVerifyCmd = cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
Action: standaloneVerify,
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: standaloneVerify,
|
||||
}
|
||||
|
||||
func untrustedSignatureDump(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
return errors.New("Usage: skopeo untrusted-signature-dump-without-verification signature")
|
||||
}
|
||||
untrustedSignaturePath := context.Args()[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading untrusted signature from %s: %v", untrustedSignaturePath, err)
|
||||
}
|
||||
|
||||
untrustedInfo, err := signature.GetUntrustedSignatureInformationWithoutVerifying(untrustedSignature)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error decoding untrusted signature: %v", err)
|
||||
}
|
||||
untrustedOut, err := json.MarshalIndent(untrustedInfo, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(context.App.Writer, string(untrustedOut))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
var untrustedSignatureDumpCmd = cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: untrustedSignatureDump,
|
||||
}
|
||||
|
||||
178
cmd/skopeo/signing_test.go
Normal file
178
cmd/skopeo/signing_test.go
Normal file
@@ -0,0 +1,178 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// fixturesTestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
|
||||
fixturesTestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
|
||||
// fixturesTestKeyFingerprint is the fingerprint of the private key.
|
||||
fixturesTestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
|
||||
// fixturesTestKeyFingerprint is the key ID of the private key.
|
||||
fixturesTestKeyShortID = "DB72F2188BB46CC8"
|
||||
)
|
||||
|
||||
// Test that results of runSkopeo failed with nothing on stdout, and substring
|
||||
// within the error message.
|
||||
func assertTestFailed(t *testing.T, stdout string, err error, substring string) {
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, stdout)
|
||||
assert.Contains(t, err.Error(), substring)
|
||||
}
|
||||
|
||||
func TestStandaloneSign(t *testing.T) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
require.NoError(t, err)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
t.Skipf("Signing not supported: %v", err)
|
||||
}
|
||||
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
{"a1", "a2", "a3"},
|
||||
{"a1", "a2", "a3", "a4"},
|
||||
{"-o", "o", "a1", "a2"},
|
||||
{"-o", "o", "a1", "a2", "a3", "a4"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"standalone-sign"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
"/this/doesnt/exist", dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
|
||||
// Invalid Docker reference
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
manifestPath, "" /* empty reference */, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "empty signature content")
|
||||
|
||||
// Unknown key.
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
manifestPath, dockerReference, "UNKNOWN GPG FINGERPRINT")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
// Error writing output
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/full",
|
||||
manifestPath, dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/dev/full")
|
||||
|
||||
// Success
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err = runSkopeo("standalone-sign", "-o", sigOutput.Name(),
|
||||
manifestPath, dockerReference, fixturesTestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
sig, err := ioutil.ReadFile(sigOutput.Name())
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
mech, err = signature.NewGPGSigningMechanism()
|
||||
require.NoError(t, err)
|
||||
defer mech.Close()
|
||||
verified, err := signature.VerifyDockerManifestSignature(sig, manifest, dockerReference, mech, fixturesTestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, dockerReference, verified.DockerReference)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest, verified.DockerManifestDigest)
|
||||
}
|
||||
|
||||
func TestStandaloneVerify(t *testing.T) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
signaturePath := "fixtures/image.signature"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2", "a3"},
|
||||
{"a1", "a2", "a3", "a4", "a5"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"standalone-verify"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-verify", "/this/doesnt/exist",
|
||||
dockerReference, fixturesTestKeyFingerprint, signaturePath)
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
|
||||
// Error reading signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
|
||||
// Error verifying signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, "fixtures/corrupt.signature")
|
||||
assertTestFailed(t, out, err, "Error verifying signature")
|
||||
|
||||
// Success
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, signaturePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "Signature verified, digest "+fixturesTestImageManifestDigest.String()+"\n", out)
|
||||
}
|
||||
|
||||
func TestUntrustedSignatureDump(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
{"a1", "a2", "a3", "a4"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"untrusted-signature-dump-without-verification"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("untrusted-signature-dump-without-verification",
|
||||
"/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
|
||||
// Error reading signature (input is not a signature)
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", "fixtures/image.manifest.json")
|
||||
assertTestFailed(t, out, err, "Error decoding untrusted signature")
|
||||
|
||||
// Success
|
||||
for _, path := range []string{"fixtures/image.signature", "fixtures/corrupt.signature"} {
|
||||
// Success
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", path)
|
||||
require.NoError(t, err)
|
||||
|
||||
var info signature.UntrustedSignatureInformation
|
||||
err := json.Unmarshal([]byte(out), &info)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest, info.UntrustedDockerManifestDigest)
|
||||
assert.Equal(t, "testing/manifest", info.UntrustedDockerReference)
|
||||
assert.NotNil(t, info.UntrustedCreatorID)
|
||||
assert.Equal(t, "atomic ", *info.UntrustedCreatorID)
|
||||
assert.NotNil(t, info.UntrustedTimestamp)
|
||||
assert.True(t, time.Unix(1458239713, 0).Equal(*info.UntrustedTimestamp))
|
||||
assert.Equal(t, fixturesTestKeyShortID, info.UntrustedShortKeyIdentifier)
|
||||
}
|
||||
}
|
||||
@@ -1,71 +1,87 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/projectatomic/skopeo/directory"
|
||||
"github.com/projectatomic/skopeo/docker"
|
||||
"github.com/projectatomic/skopeo/openshift"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
const (
|
||||
// atomicPrefix is the URL-like schema prefix used for Atomic registry image references.
|
||||
atomicPrefix = "atomic:"
|
||||
// dockerPrefix is the URL-like schema prefix used for Docker image references.
|
||||
dockerPrefix = "docker://"
|
||||
// directoryPrefix is the URL-like schema prefix used for local directories (for debugging)
|
||||
directoryPrefix = "dir:"
|
||||
)
|
||||
|
||||
// ParseImage converts image URL-like string to an initialized handler for that image.
|
||||
func parseImage(c *cli.Context) (types.Image, error) {
|
||||
var (
|
||||
imgName = c.Args().First()
|
||||
certPath = c.GlobalString("cert-path")
|
||||
tlsVerify = c.GlobalBool("tls-verify")
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(imgName, dockerPrefix):
|
||||
return docker.NewDockerImage(strings.TrimPrefix(imgName, dockerPrefix), certPath, tlsVerify)
|
||||
//case strings.HasPrefix(img, appcPrefix):
|
||||
//
|
||||
func contextFromGlobalOptions(c *cli.Context, flagPrefix string) (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: c.GlobalString("registries.d"),
|
||||
DockerCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
// DEPRECATED: keep this here for backward compatibility, but override
|
||||
// them if per subcommand flags are provided (see below).
|
||||
DockerInsecureSkipTLSVerify: !c.GlobalBoolT("tls-verify"),
|
||||
OSTreeTmpDirPath: c.String(flagPrefix + "ostree-tmp-dir"),
|
||||
}
|
||||
return nil, fmt.Errorf("no valid prefix provided")
|
||||
if c.IsSet(flagPrefix + "tls-verify") {
|
||||
ctx.DockerInsecureSkipTLSVerify = !c.BoolT(flagPrefix + "tls-verify")
|
||||
}
|
||||
if c.IsSet(flagPrefix + "creds") {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(c.String(flagPrefix + "creds"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
}
|
||||
up := strings.SplitN(creds, ":", 2)
|
||||
if len(up) == 1 {
|
||||
return up[0], "", nil
|
||||
}
|
||||
if up[0] == "" {
|
||||
return "", "", errors.New("username can't be empty")
|
||||
}
|
||||
return up[0], up[1], nil
|
||||
}
|
||||
|
||||
func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
username, password, err := parseCreds(creds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseImage converts image URL-like string to an initialized handler for that image.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func parseImage(c *cli.Context) (types.Image, error) {
|
||||
imgName := c.Args().First()
|
||||
ref, err := alltransports.ParseImageName(imgName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref.NewImage(ctx)
|
||||
}
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
func parseImageSource(c *cli.Context, name string) (types.ImageSource, error) {
|
||||
var (
|
||||
certPath = c.GlobalString("cert-path")
|
||||
tlsVerify = c.GlobalBool("tls-verify") // FIXME!! defaults to false?
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(name, dockerPrefix):
|
||||
return docker.NewDockerImageSource(strings.TrimPrefix(name, dockerPrefix), certPath, tlsVerify)
|
||||
case strings.HasPrefix(name, atomicPrefix):
|
||||
return openshift.NewOpenshiftImageSource(strings.TrimPrefix(name, atomicPrefix), certPath, tlsVerify)
|
||||
case strings.HasPrefix(name, directoryPrefix):
|
||||
return directory.NewDirImageSource(strings.TrimPrefix(name, directoryPrefix)), nil
|
||||
// requestedManifestMIMETypes is as in types.ImageReference.NewImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(c *cli.Context, name string, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("Unrecognized image reference %s", name)
|
||||
}
|
||||
|
||||
// parseImageDestination converts image URL-like string to an ImageDestination.
|
||||
func parseImageDestination(c *cli.Context, name string) (types.ImageDestination, error) {
|
||||
var (
|
||||
certPath = c.GlobalString("cert-path")
|
||||
tlsVerify = c.GlobalBool("tls-verify") // FIXME!! defaults to false?
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(name, dockerPrefix):
|
||||
return docker.NewDockerImageDestination(strings.TrimPrefix(name, dockerPrefix), certPath, tlsVerify)
|
||||
case strings.HasPrefix(name, atomicPrefix):
|
||||
return openshift.NewOpenshiftImageDestination(strings.TrimPrefix(name, atomicPrefix), certPath, tlsVerify)
|
||||
case strings.HasPrefix(name, directoryPrefix):
|
||||
return directory.NewDirImageDestination(strings.TrimPrefix(name, directoryPrefix)), nil
|
||||
ctx, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("Unrecognized image reference %s", name)
|
||||
return ref.NewImageSource(ctx, requestedManifestMIMETypes)
|
||||
}
|
||||
|
||||
159
completions/bash/skopeo
Normal file
159
completions/bash/skopeo
Normal file
@@ -0,0 +1,159 @@
|
||||
#! /bin/bash
|
||||
|
||||
: ${PROG:=$(basename ${BASH_SOURCE})}
|
||||
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
|
||||
case "$prev" in
|
||||
$options_with_args)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--sign-by
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-dir
|
||||
--dest-ostree-tmp-dir
|
||||
--dest-tls-verify
|
||||
"
|
||||
local boolean_options="
|
||||
--remove-signatures
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--raw
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_standalone_verify() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_manifest_digest() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_delete() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
commands=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
|
||||
case "$prev" in
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur opts base
|
||||
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
local command=${PROG} cpos=0
|
||||
local counter=1
|
||||
counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
case "!${words[$counter]}" in
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
declare -F $completions_func >/dev/null && $completions_func
|
||||
|
||||
eval "$previous_extglob_setting"
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete $PROG
|
||||
14
default-policy.json
Normal file
14
default-policy.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"default": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
],
|
||||
"transports":
|
||||
{
|
||||
"docker-daemon":
|
||||
{
|
||||
"": [{"type":"insecureAcceptAnything"}]
|
||||
}
|
||||
}
|
||||
}
|
||||
26
default.yaml
Normal file
26
default.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# sigstore: indicates a location that is read and write
|
||||
# sigstore-staging: indicates a location that is only for write
|
||||
#
|
||||
# sigstore and sigstore-staging take a value of the following:
|
||||
# sigstore: {schema}://location
|
||||
#
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# This is the default signature write location for docker registries.
|
||||
default-docker:
|
||||
# sigstore: file:///var/lib/atomic/sigstore
|
||||
sigstore-staging: file:///var/lib/atomic/sigstore
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
#
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# sigstore: http://privateregistry.com/sigstore/
|
||||
# sigstore-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
)
|
||||
|
||||
// manifestPath returns a path for the manifest within a directory using our conventions.
|
||||
func manifestPath(dir string) string {
|
||||
return filepath.Join(dir, "manifest.json")
|
||||
}
|
||||
|
||||
// manifestPath returns a path for a layer tarball within a directory using our conventions.
|
||||
func layerPath(dir string, digest string) string {
|
||||
// FIXME: Should we keep the digest identification?
|
||||
return filepath.Join(dir, strings.TrimPrefix(digest, "sha256:")+".tar")
|
||||
}
|
||||
|
||||
// manifestPath returns a path for a signature within a directory using our conventions.
|
||||
func signaturePath(dir string, index int) string {
|
||||
return filepath.Join(dir, fmt.Sprintf("signature-%d", index+1))
|
||||
}
|
||||
|
||||
type dirImageDestination struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
// NewDirImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func NewDirImageDestination(dir string) types.ImageDestination {
|
||||
return &dirImageDestination{dir}
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) CanonicalDockerReference() (string, error) {
|
||||
return "", fmt.Errorf("Can not determine canonical Docker reference for a local directory")
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
||||
return ioutil.WriteFile(manifestPath(d.dir), manifest, 0644)
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) PutLayer(digest string, stream io.Reader) error {
|
||||
layerFile, err := os.Create(layerPath(d.dir, digest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer layerFile.Close()
|
||||
if _, err := io.Copy(layerFile, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := layerFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
for i, sig := range signatures {
|
||||
if err := ioutil.WriteFile(signaturePath(d.dir, i), sig, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type dirImageSource struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
// NewDirImageSource returns an ImageSource reading from an existing directory.
|
||||
func NewDirImageSource(dir string) types.ImageSource {
|
||||
return &dirImageSource{dir}
|
||||
}
|
||||
|
||||
// IntendedDockerReference returns the full, unambiguous, Docker reference for this image, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
// May be "" if unknown.
|
||||
func (s *dirImageSource) IntendedDockerReference() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// it's up to the caller to determine the MIME type of the returned manifest's bytes
|
||||
func (s *dirImageSource) GetManifest(_ []string) ([]byte, string, error) {
|
||||
m, err := ioutil.ReadFile(manifestPath(s.dir))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return m, "", err
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetLayer(digest string) (io.ReadCloser, error) {
|
||||
return os.Open(layerPath(s.dir, digest))
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetSignatures() ([][]byte, error) {
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
signature, err := ioutil.ReadFile(signaturePath(s.dir, i))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
signatures = append(signatures, signature)
|
||||
}
|
||||
return signatures, nil
|
||||
}
|
||||
24
doc.go
24
doc.go
@@ -1,24 +0,0 @@
|
||||
// Package skopeo provides libraries and commands to interact with containers images.
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
//
|
||||
// "github.com/projectatomic/skopeo/docker"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// img, err := docker.NewDockerImage("fedora", "", false)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// b, err := img.Manifest()
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// fmt.Printf("%s", string(b))
|
||||
// }
|
||||
//
|
||||
// TODO(runcom)
|
||||
package skopeo
|
||||
@@ -1,362 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
)
|
||||
|
||||
const (
|
||||
dockerHostname = "docker.io"
|
||||
dockerRegistry = "registry-1.docker.io"
|
||||
dockerAuthRegistry = "https://index.docker.io/v1/"
|
||||
|
||||
dockerCfg = ".docker"
|
||||
dockerCfgFileName = "config.json"
|
||||
dockerCfgObsolete = ".dockercfg"
|
||||
|
||||
baseURL = "%s://%s/v2/"
|
||||
tagsURL = "%s/tags/list"
|
||||
manifestURL = "%s/manifests/%s"
|
||||
blobsURL = "%s/blobs/%s"
|
||||
blobUploadURL = "%s/blobs/uploads/?digest=%s"
|
||||
)
|
||||
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
type dockerClient struct {
|
||||
registry string
|
||||
username string
|
||||
password string
|
||||
wwwAuthenticate string // Cache of a value set by ping() if scheme is not empty
|
||||
scheme string // Cache of a value returned by a successful ping() if not empty
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||
func newDockerClient(refHostname, certPath string, tlsVerify bool) (*dockerClient, error) {
|
||||
var registry string
|
||||
if refHostname == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
} else {
|
||||
registry = refHostname
|
||||
}
|
||||
username, password, err := getAuth(refHostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tr *http.Transport
|
||||
if certPath != "" || !tlsVerify {
|
||||
tlsc := &tls.Config{}
|
||||
|
||||
if certPath != "" {
|
||||
cert, err := tls.LoadX509KeyPair(filepath.Join(certPath, "cert.pem"), filepath.Join(certPath, "key.pem"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading x509 key pair: %s", err)
|
||||
}
|
||||
tlsc.Certificates = append(tlsc.Certificates, cert)
|
||||
}
|
||||
tlsc.InsecureSkipVerify = !tlsVerify
|
||||
tr = &http.Transport{
|
||||
TLSClientConfig: tlsc,
|
||||
}
|
||||
}
|
||||
return &dockerClient{
|
||||
registry: registry,
|
||||
username: username,
|
||||
password: password,
|
||||
transport: tr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) makeRequest(method, url string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
if c.scheme == "" {
|
||||
pr, err := c.ping()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.wwwAuthenticate = pr.WWWAuthenticate
|
||||
c.scheme = pr.scheme
|
||||
}
|
||||
|
||||
url = fmt.Sprintf(baseURL, c.scheme, c.registry) + url
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
|
||||
for n, h := range headers {
|
||||
for _, hh := range h {
|
||||
req.Header.Add(n, hh)
|
||||
}
|
||||
}
|
||||
if c.wwwAuthenticate != "" {
|
||||
if err := c.setupRequestAuth(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
client := &http.Client{}
|
||||
if c.transport != nil {
|
||||
client.Transport = c.transport
|
||||
}
|
||||
logrus.Debugf("%s %s", method, url)
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
tokens := strings.SplitN(strings.TrimSpace(c.wwwAuthenticate), " ", 2)
|
||||
if len(tokens) != 2 {
|
||||
return fmt.Errorf("expected 2 tokens in WWW-Authenticate: %d, %s", len(tokens), c.wwwAuthenticate)
|
||||
}
|
||||
switch tokens[0] {
|
||||
case "Basic":
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
return nil
|
||||
case "Bearer":
|
||||
client := &http.Client{}
|
||||
if c.transport != nil {
|
||||
client.Transport = c.transport
|
||||
}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr := res.Header.Get("WWW-Authenticate")
|
||||
if hdr == "" || res.StatusCode != http.StatusUnauthorized {
|
||||
// no need for bearer? wtf?
|
||||
return nil
|
||||
}
|
||||
tokens = strings.Split(hdr, " ")
|
||||
tokens = strings.Split(tokens[1], ",")
|
||||
var realm, service, scope string
|
||||
for _, token := range tokens {
|
||||
if strings.HasPrefix(token, "realm") {
|
||||
realm = strings.Trim(token[len("realm="):], "\"")
|
||||
}
|
||||
if strings.HasPrefix(token, "service") {
|
||||
service = strings.Trim(token[len("service="):], "\"")
|
||||
}
|
||||
if strings.HasPrefix(token, "scope") {
|
||||
scope = strings.Trim(token[len("scope="):], "\"")
|
||||
}
|
||||
}
|
||||
|
||||
if realm == "" {
|
||||
return fmt.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
if service == "" {
|
||||
return fmt.Errorf("missing service in bearer auth challenge")
|
||||
}
|
||||
// The scope can be empty if we're not getting a token for a specific repo
|
||||
//if scope == "" && repo != "" {
|
||||
if scope == "" {
|
||||
return fmt.Errorf("missing scope in bearer auth challenge")
|
||||
}
|
||||
token, err := c.getBearerToken(realm, service, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("no handler for %s authentication", tokens[0])
|
||||
// support docker bearer with authconfig's Auth string? see docker2aci
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(realm, service, scope string) (string, error) {
|
||||
authReq, err := http.NewRequest("GET", realm, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
getParams := authReq.URL.Query()
|
||||
getParams.Add("service", service)
|
||||
if scope != "" {
|
||||
getParams.Add("scope", scope)
|
||||
}
|
||||
authReq.URL.RawQuery = getParams.Encode()
|
||||
if c.username != "" && c.password != "" {
|
||||
authReq.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
// insecure for now to contact the external token service
|
||||
tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
|
||||
client := &http.Client{Transport: tr}
|
||||
res, err := client.Do(authReq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return "", fmt.Errorf("unable to retrieve auth token: 401 unauthorized")
|
||||
case http.StatusOK:
|
||||
break
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tokenStruct := struct {
|
||||
Token string `json:"token"`
|
||||
}{}
|
||||
if err := json.Unmarshal(tokenBlob, &tokenStruct); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// TODO(runcom): reuse tokens?
|
||||
//hostAuthTokens, ok = rb.hostsV2AuthTokens[req.URL.Host]
|
||||
//if !ok {
|
||||
//hostAuthTokens = make(map[string]string)
|
||||
//rb.hostsV2AuthTokens[req.URL.Host] = hostAuthTokens
|
||||
//}
|
||||
//hostAuthTokens[repo] = tokenStruct.Token
|
||||
return tokenStruct.Token, nil
|
||||
}
|
||||
|
||||
func getAuth(hostname string) (string, string, error) {
|
||||
// TODO(runcom): get this from *cli.Context somehow
|
||||
//if username != "" && password != "" {
|
||||
//return username, password, nil
|
||||
//}
|
||||
if hostname == dockerHostname {
|
||||
hostname = dockerAuthRegistry
|
||||
}
|
||||
dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName)
|
||||
if _, err := os.Stat(dockerCfgPath); err == nil {
|
||||
j, err := ioutil.ReadFile(dockerCfgPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
var dockerAuth dockerConfigFile
|
||||
if err := json.Unmarshal(j, &dockerAuth); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
// try the normal case
|
||||
if c, ok := dockerAuth.AuthConfigs[hostname]; ok {
|
||||
return decodeDockerAuth(c.Auth)
|
||||
}
|
||||
} else if os.IsNotExist(err) {
|
||||
oldDockerCfgPath := filepath.Join(getDefaultConfigDir(dockerCfgObsolete))
|
||||
if _, err := os.Stat(oldDockerCfgPath); err != nil {
|
||||
return "", "", nil //missing file is not an error
|
||||
}
|
||||
j, err := ioutil.ReadFile(oldDockerCfgPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
var dockerAuthOld map[string]dockerAuthConfigObsolete
|
||||
if err := json.Unmarshal(j, &dockerAuthOld); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if c, ok := dockerAuthOld[hostname]; ok {
|
||||
return decodeDockerAuth(c.Auth)
|
||||
}
|
||||
} else {
|
||||
// if file is there but we can't stat it for any reason other
|
||||
// than it doesn't exist then stop
|
||||
return "", "", fmt.Errorf("%s - %v", dockerCfgPath, err)
|
||||
}
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
type apiErr struct {
|
||||
Code string
|
||||
Message string
|
||||
Detail interface{}
|
||||
}
|
||||
|
||||
type pingResponse struct {
|
||||
WWWAuthenticate string
|
||||
APIVersion string
|
||||
scheme string
|
||||
errors []apiErr
|
||||
}
|
||||
|
||||
func (c *dockerClient) ping() (*pingResponse, error) {
|
||||
client := &http.Client{}
|
||||
if c.transport != nil {
|
||||
client.Transport = c.transport
|
||||
}
|
||||
ping := func(scheme string) (*pingResponse, error) {
|
||||
url := fmt.Sprintf(baseURL, scheme, c.registry)
|
||||
resp, err := client.Get(url)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v2/", resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return nil, fmt.Errorf("error pinging repository, response code %d", resp.StatusCode)
|
||||
}
|
||||
pr := &pingResponse{}
|
||||
pr.WWWAuthenticate = resp.Header.Get("WWW-Authenticate")
|
||||
pr.APIVersion = resp.Header.Get("Docker-Distribution-Api-Version")
|
||||
pr.scheme = scheme
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
type APIErrors struct {
|
||||
Errors []apiErr
|
||||
}
|
||||
errs := &APIErrors{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(errs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pr.errors = errs.Errors
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
scheme := "https"
|
||||
pr, err := ping(scheme)
|
||||
if err != nil {
|
||||
scheme = "http"
|
||||
pr, err = ping(scheme)
|
||||
if err == nil {
|
||||
return pr, nil
|
||||
}
|
||||
}
|
||||
return pr, err
|
||||
}
|
||||
|
||||
func getDefaultConfigDir(confPath string) string {
|
||||
return filepath.Join(homedir.Get(), confPath)
|
||||
}
|
||||
|
||||
type dockerAuthConfigObsolete struct {
|
||||
Auth string `json:"auth"`
|
||||
}
|
||||
|
||||
type dockerAuthConfig struct {
|
||||
Auth string `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
type dockerConfigFile struct {
|
||||
AuthConfigs map[string]dockerAuthConfig `json:"auths"`
|
||||
}
|
||||
|
||||
func decodeDockerAuth(s string) (string, string, error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
parts := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
// if it's invalid just skip, as docker does
|
||||
return "", "", nil
|
||||
}
|
||||
user := parts[0]
|
||||
password := strings.Trim(parts[1], "\x00")
|
||||
return user, password, nil
|
||||
}
|
||||
@@ -1,286 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/projectatomic/skopeo/directory"
|
||||
"github.com/projectatomic/skopeo/docker/utils"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
)
|
||||
|
||||
var (
|
||||
validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||
)
|
||||
|
||||
type dockerImage struct {
|
||||
src *dockerImageSource
|
||||
cachedManifest []byte // Private cache for Manifest(); nil if not yet known.
|
||||
cachedSignatures [][]byte // Private cache for Signatures(); nil if not yet known.
|
||||
}
|
||||
|
||||
// NewDockerImage returns a new Image interface type after setting up
|
||||
// a client to the registry hosting the given image.
|
||||
func NewDockerImage(img, certPath string, tlsVerify bool) (types.Image, error) {
|
||||
s, err := newDockerImageSource(img, certPath, tlsVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dockerImage{src: s}, nil
|
||||
}
|
||||
|
||||
// IntendedDockerReference returns the full, unambiguous, Docker reference for this image, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
// May be "" if unknown.
|
||||
func (i *dockerImage) IntendedDockerReference() string {
|
||||
return i.src.IntendedDockerReference()
|
||||
}
|
||||
|
||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||
func (i *dockerImage) Manifest() ([]byte, error) {
|
||||
if i.cachedManifest == nil {
|
||||
m, _, err := i.src.GetManifest([]string{utils.DockerV2Schema1MIMEType})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.cachedManifest = m
|
||||
}
|
||||
return i.cachedManifest, nil
|
||||
}
|
||||
|
||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||
func (i *dockerImage) Signatures() ([][]byte, error) {
|
||||
if i.cachedSignatures == nil {
|
||||
sigs, err := i.src.GetSignatures()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.cachedSignatures = sigs
|
||||
}
|
||||
return i.cachedSignatures, nil
|
||||
}
|
||||
|
||||
func (i *dockerImage) Inspect() (*types.ImageInspectInfo, error) {
|
||||
// TODO(runcom): unused version param for now, default to docker v2-1
|
||||
m, err := i.getSchema1Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ms1, ok := m.(*manifestSchema1)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error retrivieng manifest schema1")
|
||||
}
|
||||
v1 := &v1Image{}
|
||||
if err := json.Unmarshal([]byte(ms1.History[0].V1Compatibility), v1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &types.ImageInspectInfo{
|
||||
Name: i.src.ref.FullName(),
|
||||
Tag: ms1.Tag,
|
||||
DockerVersion: v1.DockerVersion,
|
||||
Created: v1.Created,
|
||||
Labels: v1.Config.Labels,
|
||||
Architecture: v1.Architecture,
|
||||
Os: v1.OS,
|
||||
Layers: ms1.GetLayers(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
||||
func (i *dockerImage) GetRepositoryTags() ([]string, error) {
|
||||
// FIXME? Breaking the abstraction.
|
||||
url := fmt.Sprintf(tagsURL, i.src.ref.RemoteName())
|
||||
res, err := i.src.c.makeRequest("GET", url, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, fmt.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
}
|
||||
type tagsRes struct {
|
||||
Tags []string
|
||||
}
|
||||
tags := &tagsRes{}
|
||||
if err := json.NewDecoder(res.Body).Decode(tags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tags.Tags, nil
|
||||
}
|
||||
|
||||
type config struct {
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
type v1Image struct {
|
||||
// Config is the configuration of the container received from the client
|
||||
Config *config `json:"config,omitempty"`
|
||||
// DockerVersion specifies version on which image is built
|
||||
DockerVersion string `json:"docker_version,omitempty"`
|
||||
// Created timestamp when image was created
|
||||
Created time.Time `json:"created"`
|
||||
// Architecture is the hardware that the image is build and runs on
|
||||
Architecture string `json:"architecture,omitempty"`
|
||||
// OS is the operating system used to build and run the image
|
||||
OS string `json:"os,omitempty"`
|
||||
}
|
||||
|
||||
// TODO(runcom)
|
||||
func (i *dockerImage) DockerTar() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// will support v1 one day...
|
||||
type manifest interface {
|
||||
String() string
|
||||
GetLayers() []string
|
||||
}
|
||||
|
||||
type manifestSchema1 struct {
|
||||
Name string
|
||||
Tag string
|
||||
FSLayers []struct {
|
||||
BlobSum string `json:"blobSum"`
|
||||
} `json:"fsLayers"`
|
||||
History []struct {
|
||||
V1Compatibility string `json:"v1Compatibility"`
|
||||
} `json:"history"`
|
||||
// TODO(runcom) verify the downloaded manifest
|
||||
//Signature []byte `json:"signature"`
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) GetLayers() []string {
|
||||
layers := make([]string, len(m.FSLayers))
|
||||
for i, layer := range m.FSLayers {
|
||||
layers[i] = layer.BlobSum
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) String() string {
|
||||
return fmt.Sprintf("%s-%s", sanitize(m.Name), sanitize(m.Tag))
|
||||
}
|
||||
|
||||
func sanitize(s string) string {
|
||||
return strings.Replace(s, "/", "-", -1)
|
||||
}
|
||||
|
||||
func (i *dockerImage) getSchema1Manifest() (manifest, error) {
|
||||
manblob, err := i.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mschema1 := &manifestSchema1{}
|
||||
if err := json.Unmarshal(manblob, mschema1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := fixManifestLayers(mschema1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(runcom): verify manifest schema 1, 2 etc
|
||||
//if len(m.FSLayers) != len(m.History) {
|
||||
//return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
|
||||
//}
|
||||
//if len(m.FSLayers) == 0 {
|
||||
//return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
|
||||
//}
|
||||
return mschema1, nil
|
||||
}
|
||||
|
||||
func (i *dockerImage) Layers(layers ...string) error {
|
||||
m, err := i.getSchema1Manifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-"+m.String()+"-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest := directory.NewDirImageDestination(tmpDir)
|
||||
data, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(data); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(layers) == 0 {
|
||||
layers = m.GetLayers()
|
||||
}
|
||||
for _, l := range layers {
|
||||
if !strings.HasPrefix(l, "sha256:") {
|
||||
l = "sha256:" + l
|
||||
}
|
||||
if err := i.getLayer(dest, l); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *dockerImage) getLayer(dest types.ImageDestination, digest string) error {
|
||||
stream, err := i.src.GetLayer(digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stream.Close()
|
||||
return dest.PutLayer(digest, stream)
|
||||
}
|
||||
|
||||
func fixManifestLayers(manifest *manifestSchema1) error {
|
||||
type imageV1 struct {
|
||||
ID string
|
||||
Parent string
|
||||
}
|
||||
imgs := make([]*imageV1, len(manifest.FSLayers))
|
||||
for i := range manifest.FSLayers {
|
||||
img := &imageV1{}
|
||||
|
||||
if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs[i] = img
|
||||
if err := validateV1ID(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if imgs[len(imgs)-1].Parent != "" {
|
||||
return errors.New("Invalid parent ID in the base layer of the image.")
|
||||
}
|
||||
// check general duplicates to error instead of a deadlock
|
||||
idmap := make(map[string]struct{})
|
||||
var lastID string
|
||||
for _, img := range imgs {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
}
|
||||
lastID = img.ID
|
||||
idmap[lastID] = struct{}{}
|
||||
}
|
||||
// backwards loop so that we keep the remaining indexes after removing items
|
||||
for i := len(imgs) - 2; i >= 0; i-- {
|
||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
|
||||
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateV1ID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/projectatomic/skopeo/docker/utils"
|
||||
"github.com/projectatomic/skopeo/reference"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
)
|
||||
|
||||
type dockerImageDestination struct {
|
||||
ref reference.Named
|
||||
tag string
|
||||
c *dockerClient
|
||||
}
|
||||
|
||||
// NewDockerImageDestination creates a new ImageDestination for the specified image and connection specification.
|
||||
func NewDockerImageDestination(img, certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
||||
ref, tag, err := parseDockerImageName(img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := newDockerClient(ref.Hostname(), certPath, tlsVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dockerImageDestination{
|
||||
ref: ref,
|
||||
tag: tag,
|
||||
c: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) CanonicalDockerReference() (string, error) {
|
||||
return fmt.Sprintf("%s:%s", d.ref.Name(), d.tag), nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutManifest(manifest []byte) error {
|
||||
// FIXME: This only allows upload by digest, not creating a tag. See the
|
||||
// corresponding comment in NewOpenshiftImageDestination.
|
||||
digest, err := utils.ManifestDigest(manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url := fmt.Sprintf(manifestURL, d.ref.RemoteName(), digest)
|
||||
|
||||
headers := map[string][]string{}
|
||||
mimeType := utils.GuessManifestMIMEType(manifest)
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest("PUT", url, headers, bytes.NewReader(manifest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err == nil {
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
logrus.Debugf("Error uploading manifest, status %d, %#v", res.StatusCode, res)
|
||||
return fmt.Errorf("Error uploading manifest to %s, status %d", url, res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutLayer(digest string, stream io.Reader) error {
|
||||
checkURL := fmt.Sprintf(blobsURL, d.ref.RemoteName(), digest)
|
||||
|
||||
logrus.Debugf("Checking %s", checkURL)
|
||||
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusOK && res.Header.Get("Docker-Content-Digest") == digest {
|
||||
logrus.Debugf("... already exists, not uploading")
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("... failed, status %d", res.StatusCode)
|
||||
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadURL := fmt.Sprintf(blobUploadURL, d.ref.RemoteName(), digest)
|
||||
logrus.Debugf("Uploading %s", uploadURL)
|
||||
// FIXME: Set Content-Length?
|
||||
res, err = d.c.makeRequest("POST", uploadURL, map[string][]string{"Content-Type": {"application/octet-stream"}}, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading, status %d", res.StatusCode)
|
||||
return fmt.Errorf("Error uploading to %s, status %d", uploadURL, res.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/projectatomic/skopeo/reference"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
)
|
||||
|
||||
type errFetchManifest struct {
|
||||
statusCode int
|
||||
body []byte
|
||||
}
|
||||
|
||||
func (e errFetchManifest) Error() string {
|
||||
return fmt.Sprintf("error fetching manifest: status code: %d, body: %s", e.statusCode, string(e.body))
|
||||
}
|
||||
|
||||
type dockerImageSource struct {
|
||||
ref reference.Named
|
||||
tag string
|
||||
c *dockerClient
|
||||
}
|
||||
|
||||
// newDockerImageSource is the same as NewDockerImageSource, only it returns the more specific *dockerImageSource type.
|
||||
func newDockerImageSource(img, certPath string, tlsVerify bool) (*dockerImageSource, error) {
|
||||
ref, tag, err := parseDockerImageName(img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := newDockerClient(ref.Hostname(), certPath, tlsVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dockerImageSource{
|
||||
ref: ref,
|
||||
tag: tag,
|
||||
c: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewDockerImageSource creates a new ImageSource for the specified image and connection specification.
|
||||
func NewDockerImageSource(img, certPath string, tlsVerify bool) (types.ImageSource, error) {
|
||||
return newDockerImageSource(img, certPath, tlsVerify)
|
||||
}
|
||||
|
||||
// IntendedDockerReference returns the full, unambiguous, Docker reference for this image, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
// May be "" if unknown.
|
||||
func (s *dockerImageSource) IntendedDockerReference() string {
|
||||
return fmt.Sprintf("%s:%s", s.ref.Name(), s.tag)
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) GetManifest(mimetypes []string) ([]byte, string, error) {
|
||||
url := fmt.Sprintf(manifestURL, s.ref.RemoteName(), s.tag)
|
||||
// TODO(runcom) set manifest version header! schema1 for now - then schema2 etc etc and v1
|
||||
// TODO(runcom) NO, switch on the resulter manifest like Docker is doing
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = mimetypes
|
||||
res, err := s.c.makeRequest("GET", url, headers, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
manblob, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", errFetchManifest{res.StatusCode, manblob}
|
||||
}
|
||||
// We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
|
||||
return manblob, res.Header.Get("Content-Type"), nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) GetLayer(digest string) (io.ReadCloser, error) {
|
||||
url := fmt.Sprintf(blobsURL, s.ref.RemoteName(), digest)
|
||||
logrus.Infof("Downloading %s", url)
|
||||
res, err := s.c.makeRequest("GET", url, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, fmt.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package docker
|
||||
|
||||
import "github.com/projectatomic/skopeo/reference"
|
||||
|
||||
// parseDockerImageName converts a string into a reference and tag value.
|
||||
func parseDockerImageName(img string) (reference.Named, string, error) {
|
||||
ref, err := reference.ParseNamed(img)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
ref = reference.WithDefaultTag(ref)
|
||||
var tag string
|
||||
switch x := ref.(type) {
|
||||
case reference.Canonical:
|
||||
tag = x.Digest().String()
|
||||
case reference.NamedTagged:
|
||||
tag = x.Tag()
|
||||
}
|
||||
return ref, tag, nil
|
||||
}
|
||||
Binary file not shown.
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"schemaVersion": 99999,
|
||||
"name": "mitr/noversion-nonsense",
|
||||
"tag": "latest"
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
||||
"size": 2094,
|
||||
"digest": "sha256:7820f9a86d4ad15a2c4f0c0e5479298df2aa7c2f6871288e2ef8546f3e7b6783",
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
||||
"size": 1922,
|
||||
"digest": "sha256:ae1b0e06e8ade3a11267564a26e750585ba2259c0ecab59ab165ad1af41d1bdd",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux",
|
||||
"features": [
|
||||
"sse"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
||||
"size": 2084,
|
||||
"digest": "sha256:e4c0df75810b953d6717b8f8f28298d73870e8aa2a0d5e77b8391f16fdfbbbe2",
|
||||
"platform": {
|
||||
"architecture": "s390x",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
||||
"size": 2084,
|
||||
"digest": "sha256:07ebe243465ef4a667b78154ae6c3ea46fdb1582936aac3ac899ea311a701b40",
|
||||
"platform": {
|
||||
"architecture": "arm",
|
||||
"os": "linux",
|
||||
"variant": "armv7"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
||||
"size": 2090,
|
||||
"digest": "sha256:fb2fc0707b86dafa9959fe3d29e66af8787aee4d9a23581714be65db4265ad8a",
|
||||
"platform": {
|
||||
"architecture": "arm64",
|
||||
"os": "linux",
|
||||
"variant": "armv8"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "mitr/buxybox",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"f1b5eb0a1215f663765d509b6cdf3841bc2bcff0922346abb943d1342d469a97\",\"parent\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"c0924f5b281a1992127d0afc065e59548ded8880b08aea4debd56d4497acb17a\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Checksum=4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\"],\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"parent\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:38.563048924Z\",\"container\":\"fd4cf54dcd239fbae9bdade9db48e41880b436d27cb5313f60952a46ab04deff\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Name=atomic-test-2\"],\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:32.948089874Z\",\"container\":\"56f0fe1dfc95755dd6cda10f7215c9937a8d9c6348d079c581a261fd4c2f3a5f\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) MAINTAINER \\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "OZ45:U3IG:TDOI:PMBD:NGP2:LDIW:II2U:PSBI:MMCZ:YZUP:TUUO:XPZT",
|
||||
"kty": "EC",
|
||||
"x": "ReC5c0J9tgXSdUL4_xzEt5RsD8kFt2wWSgJcpAcOQx8",
|
||||
"y": "3sBGEqQ3ZMeqPKwQBAadN2toOUEASha18xa0WwsDF-M"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "dV1paJ3Ck1Ph4FcEhg_frjqxdlGdI6-ywRamk6CvMOcaOEUdCWCpCPQeBQpD2N6tGjkoG1BbstkFNflllfenCw",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjU0NzgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNC0xOFQyMDo1NDo0MloifQ"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 7023,
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 16724,
|
||||
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 73109,
|
||||
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 7023,
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
|
||||
},
|
||||
"layers": [
|
||||
]
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package utils
|
||||
|
||||
const (
|
||||
// TestV2S2ManifestDigest is the Docker manifest digest of "v2s2.manifest.json"
|
||||
TestV2S2ManifestDigest = "sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"
|
||||
// TestV2S1ManifestDigest is the Docker manifest digest of "v2s1.manifest.json"
|
||||
TestV2S1ManifestDigest = "sha256:077594da70fc17ec2c93cfa4e6ed1fcc26992851fb2c71861338aaf4aa9e41b1"
|
||||
)
|
||||
@@ -1,75 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
|
||||
|
||||
// ManifestMIMETypes returns a slice of supported MIME types
|
||||
func ManifestMIMETypes() []string {
|
||||
return []string{
|
||||
DockerV2Schema1MIMEType,
|
||||
DockerV2Schema2MIMEType,
|
||||
DockerV2ListMIMEType,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// DockerV2Schema1MIMEType MIME type represents Docker manifest schema 1
|
||||
DockerV2Schema1MIMEType = "application/vnd.docker.distribution.manifest.v1+json"
|
||||
// DockerV2Schema2MIMEType MIME type represents Docker manifest schema 2
|
||||
DockerV2Schema2MIMEType = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
// DockerV2ListMIMEType MIME type represents Docker manifest schema 2 list
|
||||
DockerV2ListMIMEType = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
)
|
||||
|
||||
// GuessManifestMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||
func GuessManifestMIMEType(manifest []byte) string {
|
||||
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
|
||||
// Also docker/distribution/manifest.Versioned.
|
||||
meta := struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &meta); err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch meta.MediaType {
|
||||
case DockerV2Schema2MIMEType, DockerV2ListMIMEType: // A recognized type.
|
||||
return meta.MediaType
|
||||
}
|
||||
switch meta.SchemaVersion {
|
||||
case 1:
|
||||
return DockerV2Schema1MIMEType
|
||||
case 2: // Really should not happen, meta.MediaType should have been set. But given the data, this is our best guess.
|
||||
return DockerV2Schema2MIMEType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ManifestDigest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
|
||||
func ManifestDigest(manifest []byte) (string, error) {
|
||||
if GuessManifestMIMEType(manifest) == DockerV2Schema1MIMEType {
|
||||
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
manifest, err = sig.Payload()
|
||||
if err != nil {
|
||||
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
|
||||
// that libtrust itself has josebase64UrlEncode()d
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(manifest)
|
||||
return "sha256:" + hex.EncodeToString(hash[:]), nil
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGuessManifestMIMEType(t *testing.T) {
|
||||
cases := []struct {
|
||||
path string
|
||||
mimeType string
|
||||
}{
|
||||
{"v2s2.manifest.json", DockerV2Schema2MIMEType},
|
||||
{"v2list.manifest.json", DockerV2ListMIMEType},
|
||||
{"v2s1.manifest.json", DockerV2Schema1MIMEType},
|
||||
{"v2s1-invalid-signatures.manifest.json", DockerV2Schema1MIMEType},
|
||||
{"v2s2nomime.manifest.json", DockerV2Schema2MIMEType}, // It is unclear whether this one is legal, but we should guess v2s2 if anything at all.
|
||||
{"unknown-version.manifest.json", ""},
|
||||
{"non-json.manifest.json", ""}, // Not a manifest (nor JSON) at all
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path))
|
||||
require.NoError(t, err)
|
||||
mimeType := GuessManifestMIMEType(manifest)
|
||||
assert.Equal(t, c.mimeType, mimeType)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManifestDigest(t *testing.T) {
|
||||
cases := []struct {
|
||||
path string
|
||||
digest string
|
||||
}{
|
||||
{"v2s2.manifest.json", TestV2S2ManifestDigest},
|
||||
{"v2s1.manifest.json", TestV2S1ManifestDigest},
|
||||
}
|
||||
for _, c := range cases {
|
||||
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path))
|
||||
require.NoError(t, err)
|
||||
digest, err := ManifestDigest(manifest)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.digest, digest)
|
||||
}
|
||||
|
||||
manifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
|
||||
require.NoError(t, err)
|
||||
digest, err := ManifestDigest(manifest)
|
||||
assert.Error(t, err)
|
||||
|
||||
digest, err = ManifestDigest([]byte{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", digest)
|
||||
}
|
||||
246
docs/skopeo.1.md
Normal file
246
docs/skopeo.1.md
Normal file
@@ -0,0 +1,246 @@
|
||||
% SKOPEO(1) Skopeo Man Pages
|
||||
% Jhon Honce
|
||||
% August 2016
|
||||
# NAME
|
||||
skopeo -- Various operations with container images and container image registries
|
||||
# SYNOPSIS
|
||||
**skopeo** [_global options_] _command_ [_command options_]
|
||||
# DESCRIPTION
|
||||
`skopeo` is a command line utility providing various operations with container images and container image registries. For example, it is able to inspect a repository on a container registry and fetch image. It fetches the repository's manifest and it is able to show you a `docker inspect`-like json output about a whole repository or a tag. This tool, in contrast to `docker inspect`, helps you gather useful information about a repository or a tag without requiring you to run `docker pull` - e.g. - which tags are available for the given repository? which labels the image has?
|
||||
|
||||
It also allows you to copy container images between various registries, possibly converting them as necessary, and to sign and verify images.
|
||||
## IMAGE NAMES
|
||||
Most commands refer to container images, using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
**atomic:**_hostname_**/**_namespace_**/**_stream_**:**_tag_
|
||||
An image served by an OpenShift(Atomic) Registry server. The current OpenShift project and OpenShift Registry instance are by default read from `$HOME/.kube/config`, which is set e.g. using `(oc login)`.
|
||||
|
||||
**containers-storage://**_docker-reference_
|
||||
An image located in a local containers/storage image store. Location and image store specified in /etc/containers/storage.conf
|
||||
|
||||
**dir:**_path_
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formated file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
|
||||
**docker-daemon:**_docker-reference_
|
||||
An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
|
||||
|
||||
**oci:**_path_**:**_tag_
|
||||
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
# OPTIONS
|
||||
|
||||
**--debug** enable debug output
|
||||
|
||||
**--policy** _path-to-policy_ Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
|
||||
|
||||
**--insecure-policy** Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
|
||||
|
||||
**--registries.d** _dir_ use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
|
||||
|
||||
**--help**|**-h** Show help
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
# COMMANDS
|
||||
|
||||
## skopeo copy
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry (defaults to true)
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## skopeo delete
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you need to execute the container registry garabage collector. E.g.,
|
||||
|
||||
```sh
|
||||
$ docker exec -it registry bin/registry garbage-collect /etc/docker/registry/config.yml
|
||||
```
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## skopeo inspect
|
||||
**skopeo inspect** [**--raw**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## skopeo manifest-digest
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## skopeo standalone-sign
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## skopeo standalone-verify
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## skopeo help
|
||||
show help for `skopeo`
|
||||
|
||||
# FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/policy.json.md .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/registries.d.md .
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
## skopeo copy
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
## skopeo delete
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
## skopeo inspect
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
## skopeo layers
|
||||
Another method to retrieve the layers for the busybox image from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo layers docker://busybox
|
||||
$ ls layers-500650331/
|
||||
8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
manifest.json
|
||||
a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.tar
|
||||
```
|
||||
## skopeo manifest-digest
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
## skopeo standalone-sign
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
See `skopeo copy` above for the preferred method of signing images.
|
||||
## skopeo standalone-verify
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
# AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PROJECT=github.com/projectatomic/skopeo
|
||||
|
||||
# Downloads dependencies into vendor/ directory
|
||||
mkdir -p vendor
|
||||
|
||||
original_GOPATH=$GOPATH
|
||||
export GOPATH="${PWD}/vendor:$GOPATH"
|
||||
|
||||
find="/usr/bin/find"
|
||||
|
||||
clone() {
|
||||
local delete_vendor=true
|
||||
if [ "x$1" = x--keep-vendor ]; then
|
||||
delete_vendor=false
|
||||
shift
|
||||
fi
|
||||
|
||||
local vcs="$1"
|
||||
local pkg="$2"
|
||||
local rev="$3"
|
||||
local url="$4"
|
||||
|
||||
: ${url:=https://$pkg}
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo -n "$pkg @ $rev: "
|
||||
|
||||
if [ -d "$target" ]; then
|
||||
echo -n 'rm old, '
|
||||
rm -rf "$target"
|
||||
fi
|
||||
|
||||
echo -n 'clone, '
|
||||
case "$vcs" in
|
||||
git)
|
||||
git clone --quiet --no-checkout "$url" "$target"
|
||||
( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" )
|
||||
;;
|
||||
hg)
|
||||
hg clone --quiet --updaterev "$rev" "$url" "$target"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -n 'rm VCS, '
|
||||
( cd "$target" && rm -rf .{git,hg} )
|
||||
|
||||
if $delete_vendor; then
|
||||
echo -n 'rm vendor, '
|
||||
( cd "$target" && rm -rf vendor Godeps/_workspace )
|
||||
fi
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
clean() {
|
||||
# If $GOPATH starts with ./vendor, (go list) shows the short-form import paths for packages inside ./vendor.
|
||||
# So, reset GOPATH to the external value (without ./vendor), so that the grep -v works.
|
||||
local packages=($(GOPATH=$original_GOPATH go list -e ./... | grep -v "^${PROJECT}/vendor"))
|
||||
local platforms=( linux/amd64 linux/386 )
|
||||
|
||||
local buildTags=( )
|
||||
|
||||
echo
|
||||
|
||||
echo -n 'collecting import graph, '
|
||||
local IFS=$'\n'
|
||||
local imports=( $(
|
||||
for platform in "${platforms[@]}"; do
|
||||
export GOOS="${platform%/*}";
|
||||
export GOARCH="${platform##*/}";
|
||||
go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}"
|
||||
go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}"
|
||||
done | grep -vE "^${PROJECT}" | sort -u
|
||||
) )
|
||||
# .TestImports does not include indirect dependencies, so do one more iteration.
|
||||
imports+=( $(
|
||||
go list -e -f '{{join .Deps "\n"}}' "${imports[@]}" | grep -vE "^${PROJECT}" | sort -u
|
||||
) )
|
||||
imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") )
|
||||
unset IFS
|
||||
|
||||
echo -n 'pruning unused packages, '
|
||||
findArgs=(
|
||||
# This directory contains only .c and .h files which are necessary
|
||||
# -path vendor/src/github.com/mattn/go-sqlite3/code
|
||||
)
|
||||
for import in "${imports[@]}"; do
|
||||
[ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or )
|
||||
findArgs+=( -path "vendor/src/$import" )
|
||||
done
|
||||
local IFS=$'\n'
|
||||
local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') )
|
||||
unset IFS
|
||||
for dir in "${prune[@]}"; do
|
||||
$find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';'
|
||||
rmdir "$dir" 2>/dev/null || true
|
||||
done
|
||||
|
||||
echo -n 'pruning unused files, '
|
||||
$find vendor -type f -name '*_test.go' -exec rm -v '{}' ';'
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
# Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring
|
||||
fix_rewritten_imports () {
|
||||
local pkg="$1"
|
||||
local remove="${pkg}/Godeps/_workspace/src/"
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo "$pkg: fixing rewritten imports"
|
||||
$find "$target" -name \*.go -exec sed -i -e "s|\"${remove}|\"|g" {} \;
|
||||
}
|
||||
7
hack/btrfs_tag.sh
Executable file
7
hack/btrfs_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo btrfs_noversion
|
||||
fi
|
||||
14
hack/libdm_tag.sh
Executable file
14
hack/libdm_tag.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
cc -c -o "$tmpdir"/libdm_tag.o -x c - > /dev/null 2> /dev/null << EOF
|
||||
#include <libdevmapper.h>
|
||||
int main() {
|
||||
struct dm_task *task;
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo libdm_no_deferred_remove
|
||||
fi
|
||||
@@ -72,10 +72,10 @@ TESTFLAGS+=" -test.timeout=10m"
|
||||
go_test_dir() {
|
||||
dir=$1
|
||||
(
|
||||
echo '+ go test' $TESTFLAGS "${SKOPEO_PKG}${dir#.}"
|
||||
echo '+ go test' $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"} "${SKOPEO_PKG}${dir#.}"
|
||||
cd "$dir"
|
||||
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
|
||||
go test $TESTFLAGS
|
||||
go test $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"}
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@ bundle_test_integration() {
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
(
|
||||
make binary
|
||||
make install-binary
|
||||
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
bundle_test_integration
|
||||
) 2>&1
|
||||
|
||||
37
hack/vendor.sh
Executable file → Normal file
37
hack/vendor.sh
Executable file → Normal file
@@ -1,28 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool.
|
||||
# For updating dependencies you should change `vendor.conf` file in root of the
|
||||
# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for
|
||||
# vndr usage.
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$BASH_SOURCE")/.."
|
||||
rm -rf vendor/
|
||||
source 'hack/.vendor-helpers.sh'
|
||||
if ! hash vndr; then
|
||||
echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
clone git github.com/codegangsta/cli v1.2.0
|
||||
clone git github.com/Sirupsen/logrus v0.10.0
|
||||
clone git github.com/go-check/check v1
|
||||
clone git github.com/stretchr/testify v1.1.3
|
||||
clone git github.com/davecgh/go-spew master
|
||||
clone git github.com/pmezard/go-difflib master
|
||||
clone git github.com/docker/docker 0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d
|
||||
clone git github.com/docker/distribution master
|
||||
clone git github.com/docker/libtrust master
|
||||
clone git github.com/opencontainers/runc master
|
||||
clone git github.com/mtrmac/gpgme master
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
clone git github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
clone git k8s.io/kubernetes 4a3f9c5b19c7ff804cbc1bf37a15c044ca5d2353 https://github.com/openshift/kubernetes
|
||||
clone git github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
clone git gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
clone git github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
|
||||
clean
|
||||
|
||||
mv vendor/src/* vendor/
|
||||
vndr "$@"
|
||||
|
||||
@@ -3,20 +3,15 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-check/check"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
)
|
||||
|
||||
const (
|
||||
privateRegistryURL0 = "127.0.0.1:5000"
|
||||
privateRegistryURL1 = "127.0.0.1:5001"
|
||||
privateRegistryURL2 = "127.0.0.1:5002"
|
||||
privateRegistryURL3 = "127.0.0.1:5003"
|
||||
privateRegistryURL4 = "127.0.0.1:5004"
|
||||
|
||||
skopeoBinary = "skopeo"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
@@ -28,15 +23,13 @@ func init() {
|
||||
}
|
||||
|
||||
type SkopeoSuite struct {
|
||||
regV1 *testRegistryV1
|
||||
regV2 *testRegistryV2
|
||||
regV2Shema1 *testRegistryV2
|
||||
regV1WithAuth *testRegistryV1 // does v1 support auth?
|
||||
regV2WithAuth *testRegistryV2
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) SetUpSuite(c *check.C) {
|
||||
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
@@ -44,24 +37,14 @@ func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) SetUpTest(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
s.regV1 = setupRegistryV1At(c, privateRegistryURL0, false) // TODO:(runcom)
|
||||
s.regV2 = setupRegistryV2At(c, privateRegistryURL1, false, false)
|
||||
s.regV2Shema1 = setupRegistryV2At(c, privateRegistryURL2, false, true)
|
||||
s.regV1WithAuth = setupRegistryV1At(c, privateRegistryURL3, true) // not used
|
||||
s.regV2WithAuth = setupRegistryV2At(c, privateRegistryURL4, true, false)
|
||||
s.regV2 = setupRegistryV2At(c, privateRegistryURL0, false, false)
|
||||
s.regV2WithAuth = setupRegistryV2At(c, privateRegistryURL1, true, false)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TearDownTest(c *check.C) {
|
||||
// not checking V1 registries now...
|
||||
if s.regV2 != nil {
|
||||
s.regV2.Close()
|
||||
}
|
||||
if s.regV2Shema1 != nil {
|
||||
s.regV2Shema1.Close()
|
||||
}
|
||||
if s.regV2WithAuth != nil {
|
||||
//cmd := exec.Command("docker", "logout", s.regV2WithAuth)
|
||||
//c.Assert(cmd.Run(), check.IsNil)
|
||||
@@ -73,51 +56,34 @@ func (s *SkopeoSuite) TearDownTest(c *check.C) {
|
||||
//func skopeoCmd()
|
||||
|
||||
func (s *SkopeoSuite) TestVersion(c *check.C) {
|
||||
out, err := exec.Command(skopeoBinary, "--version").CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf(string(out)))
|
||||
wanted := skopeoBinary + " version "
|
||||
if !strings.Contains(string(out), wanted) {
|
||||
c.Fatalf("wanted %s, got %s", wanted, string(out))
|
||||
}
|
||||
wanted := fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version)
|
||||
assertSkopeoSucceeds(c, wanted, "--version")
|
||||
}
|
||||
|
||||
var (
|
||||
errFetchManifest = "error fetching manifest: status code: %s"
|
||||
)
|
||||
|
||||
func (s *SkopeoSuite) TestCanAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
|
||||
// TODO(runcom)
|
||||
c.Skip("we need to restore --username --password flags!")
|
||||
out, err := exec.Command(skopeoBinary, "--docker-cfg=''", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)).CombinedOutput()
|
||||
c.Assert(err, check.NotNil, check.Commentf(string(out)))
|
||||
wanted := fmt.Sprintf(errFetchManifest, "401")
|
||||
if !strings.Contains(string(out), wanted) {
|
||||
c.Fatalf("wanted %s, got %s", wanted, string(out))
|
||||
}
|
||||
wanted := ".*manifest unknown: manifest unknown.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", "--creds="+s.regV2WithAuth.username+":"+s.regV2WithAuth.password, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
|
||||
// TODO(runcom): mock the empty docker-cfg by removing it in the test itself (?)
|
||||
c.Skip("mock empty docker config")
|
||||
out, err := exec.Command(skopeoBinary, "--docker-cfg=''", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)).CombinedOutput()
|
||||
c.Assert(err, check.NotNil, check.Commentf(string(out)))
|
||||
wanted := fmt.Sprintf(errFetchManifest, "401")
|
||||
if !strings.Contains(string(out), wanted) {
|
||||
c.Fatalf("wanted %s, got %s", wanted, string(out))
|
||||
}
|
||||
wanted := ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
|
||||
wanted := ".*flag provided but not defined: -cert-path.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
|
||||
}
|
||||
|
||||
// TODO(runcom): as soon as we can push to registries ensure you can inspect here
|
||||
// not just get image not found :)
|
||||
func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C) {
|
||||
out, err := exec.Command(skopeoBinary, "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2.url)).CombinedOutput()
|
||||
out, err := exec.Command(skopeoBinary, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2.url)).CombinedOutput()
|
||||
c.Assert(err, check.NotNil, check.Commentf(string(out)))
|
||||
wanted := fmt.Sprintf(errFetchManifest, "404")
|
||||
if !strings.Contains(string(out), wanted) {
|
||||
c.Fatalf("wanted %s, got %s", wanted, string(out))
|
||||
}
|
||||
wanted = fmt.Sprintf(errFetchManifest, "401")
|
||||
if strings.Contains(string(out), wanted) {
|
||||
c.Fatalf("not wanted %s, got %s", wanted, string(out))
|
||||
}
|
||||
wanted := ".*manifest unknown.*"
|
||||
c.Assert(string(out), check.Matches, "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
c.Assert(string(out), check.Not(check.Matches), "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
623
integration/copy_test.go
Normal file
623
integration/copy_test.go
Normal file
@@ -0,0 +1,623 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
check.Suite(&CopySuite{})
|
||||
}
|
||||
|
||||
const (
|
||||
v2DockerRegistryURL = "localhost:5555" // Update also policy.json
|
||||
v2s1DockerRegistryURL = "localhost:5556"
|
||||
)
|
||||
|
||||
type CopySuite struct {
|
||||
cluster *openshiftCluster
|
||||
registry *testRegistryV2
|
||||
s1Registry *testRegistryV2
|
||||
gpgHome string
|
||||
}
|
||||
|
||||
func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
if os.Getenv("SKOPEO_CONTAINER_TESTS") != "1" {
|
||||
c.Skip("Not running in a container, refusing to affect user state")
|
||||
}
|
||||
|
||||
s.cluster = startOpenshiftCluster(c) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
|
||||
|
||||
for _, stream := range []string{"unsigned", "personal", "official", "naming", "cosigned", "compression", "schema1", "schema2"} {
|
||||
isJSON := fmt.Sprintf(`{
|
||||
"kind": "ImageStream",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "%s"
|
||||
},
|
||||
"spec": {}
|
||||
}`, stream)
|
||||
runCommandWithInput(c, isJSON, "oc", "create", "-f", "-")
|
||||
}
|
||||
|
||||
// FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, false, false)
|
||||
s.s1Registry = setupRegistryV2At(c, v2s1DockerRegistryURL, false, true)
|
||||
|
||||
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.gpgHome = gpgHome
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%commit\n",
|
||||
key, key)
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
os.RemoveAll(s.gpgHome)
|
||||
}
|
||||
if s.registry != nil {
|
||||
s.registry.Close()
|
||||
}
|
||||
if s.s1Registry != nil {
|
||||
s.s1Registry.Close()
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWithManifestList(c *check.C) {
|
||||
assertSkopeoFails(c, ".*can not copy docker://estesp/busybox:latest: manifest contains multiple images.*", "copy", "docker://estesp/busybox:latest", "dir:somedir")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenImageOSDoesntMatchRuntimeOS(c *check.C) {
|
||||
c.Skip("can't run this on Travis")
|
||||
assertSkopeoFails(c, `.*image operating system "windows" cannot be used on "linux".*`, "copy", "docker://microsoft/windowsservercore", "containers-storage:test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
// "push": dir: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned")
|
||||
// The result of pushing and pulling is an equivalent image, except for schema1 embedded names.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2)
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:unsigned")
|
||||
}
|
||||
|
||||
// The most basic (skopeo copy) use:
|
||||
func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+dir1)
|
||||
// "push": dir: → docker(v2s2):
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"busybox:unsigned")
|
||||
// The result of pushing and pulling is an unmodified image.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", ourRegistry+"busybox:unsigned", "dir:"+dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
|
||||
// docker v2s2 -> OCI image layout
|
||||
// ociDest will be created by oci: if it doesn't exist
|
||||
// so don't create it here to exercise auto-creation
|
||||
ociDest := "busybox-latest"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:latest", "oci:"+ociDest)
|
||||
_, err = os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
// Check whether dir: images in dir1 and dir2 are equal, ignoring schema1 signatures.
|
||||
func assertDirImagesAreEqual(c *check.C, dir1, dir2 string) {
|
||||
// The manifests may have different JWS signatures; so, compare the manifests by digests, which
|
||||
// strips the signatures.
|
||||
digests := []digest.Digest{}
|
||||
for _, dir := range []string{dir1, dir2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
digest, err := manifest.Digest(m)
|
||||
c.Assert(err, check.IsNil)
|
||||
digests = append(digests, digest)
|
||||
}
|
||||
c.Assert(digests[0], check.Equals, digests[1])
|
||||
// Then compare the rest file by file.
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", "-x", "manifest.json", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
// Check whether schema1 dir: images in dir1 and dir2 are equal, ignoring schema1 signatures and the embedded path/tag values, which should have the expected values.
|
||||
func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref2 string) {
|
||||
// The manifests may have different JWS signatures and names; so, unmarshal and delete these elements.
|
||||
manifests := []map[string]interface{}{}
|
||||
for dir, ref := range map[string]string{dir1: ref1, dir2: ref2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
data := map[string]interface{}{}
|
||||
err = json.Unmarshal(m, &data)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(data["schemaVersion"], check.Equals, float64(1))
|
||||
colon := strings.LastIndex(ref, ":")
|
||||
c.Assert(colon, check.Not(check.Equals), -1)
|
||||
c.Assert(data["name"], check.Equals, ref[:colon])
|
||||
c.Assert(data["tag"], check.Equals, ref[colon+1:])
|
||||
for _, key := range []string{"signatures", "name", "tag"} {
|
||||
delete(data, key)
|
||||
}
|
||||
manifests = append(manifests, data)
|
||||
}
|
||||
c.Assert(manifests[0], check.DeepEquals, manifests[1])
|
||||
// Then compare the rest file by file.
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", "-x", "manifest.json", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
// Streaming (skopeo copy)
|
||||
func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "streaming-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "streaming-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
|
||||
// Compare (copies of) the original and the copy:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:streaming")
|
||||
// FIXME: Also check pushing to docker://
|
||||
}
|
||||
|
||||
// OCI round-trip testing. It's very important to make sure that OCI <-> Docker
|
||||
// conversion works (while skopeo handles many things, one of the most obvious
|
||||
// benefits of a tool like skopeo is that you can use OCI tooling to create an
|
||||
// image and then as the final step convert the image to a non-standard format
|
||||
// like Docker). But this only works if we _test_ it.
|
||||
func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
oci1, err := ioutil.TempDir("", "oci-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "oci-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://busybox", "oci:"+oci1+":latest")
|
||||
// OCI -> Docker
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy")
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", ourRegistry+"original/busybox:oci_copy", "oci:"+oci2+":latest")
|
||||
// OCI -> Docker
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci2+":latest", ourRegistry+"original/busybox:oci_copy2")
|
||||
|
||||
// TODO: Add some more tags to output to and check those work properly.
|
||||
|
||||
// First, make sure the OCI blobs are the same. This should _always_ be true.
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1+"/blobs", oci2+"/blobs")
|
||||
c.Assert(out, check.Equals, "")
|
||||
|
||||
// For some silly reason we pass a logger to the OCI library here...
|
||||
//logger := log.New(os.Stderr, "", 0)
|
||||
|
||||
// TODO: Verify using the upstream OCI image validator.
|
||||
//err = image.ValidateLayout(oci1, nil, logger)
|
||||
//c.Assert(err, check.IsNil)
|
||||
//err = image.ValidateLayout(oci2, nil, logger)
|
||||
//c.Assert(err, check.IsNil)
|
||||
|
||||
// Now verify that everything is identical. Currently this is true, but
|
||||
// because we recompute the manifests on-the-fly this doesn't necessarily
|
||||
// always have to be true (but if this breaks in the future __PLEASE__ make
|
||||
// sure that the breakage actually makes sense before removing this check).
|
||||
out = combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
// --sign-by and --policy copy, primarily using atomic:
|
||||
func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
dir, err := ioutil.TempDir("", "signatures-dest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dirDest := "dir:" + dir
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
|
||||
// type: reject
|
||||
assertSkopeoFails(c, ".*Source image rejected: Running image docker://busybox:latest is rejected by policy.*",
|
||||
"--policy", policy, "copy", "docker://busybox:latest", dirDest)
|
||||
|
||||
// type: insecureAcceptAnything
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://openshift/hello-openshift", dirDest)
|
||||
|
||||
// type: signedBy
|
||||
// Sign the images
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "docker://busybox:1.26", "atomic:localhost:5006/myns/personal:personal")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", "docker://busybox:1.26.1", "atomic:localhost:5006/myns/official:official")
|
||||
// Verify that we can pull them
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:personal", dirDest)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:official", dirDest)
|
||||
// Verify that mis-signed images are rejected
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/personal:personal", "atomic:localhost:5006/myns/official:attack")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/personal:attack")
|
||||
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
|
||||
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:attack", dirDest)
|
||||
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
|
||||
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:attack", dirDest)
|
||||
|
||||
// Verify that signed identity is verified.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:test1")
|
||||
assertSkopeoFails(c, ".*Source image rejected: Signature for identity localhost:5006/myns/official:official is not accepted.*",
|
||||
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/naming:test1", dirDest)
|
||||
// signedIdentity works
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:naming")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/naming:naming", dirDest)
|
||||
|
||||
// Verify that cosigning requirements are enforced
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned")
|
||||
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
|
||||
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest)
|
||||
}
|
||||
|
||||
// --policy copy for dir: sources
|
||||
func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "dir-signatures-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDirDest := "dir:" + topDir
|
||||
|
||||
for _, suffix := range []string{"/dir1", "/dir2", "/restricted/personal", "/restricted/official", "/restricted/badidentity", "/dest"} {
|
||||
err := os.MkdirAll(topDir+suffix, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
// Note the "/@dirpath@": The value starts with a slash so that it is not rejected in other tests which do not replace it,
|
||||
// but we must ensure that the result is a canonical path, not something starting with a "//".
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome, "/@dirpath@": topDir + "/restricted"})
|
||||
defer os.Remove(policy)
|
||||
|
||||
// Get some images.
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:armfh", topDirDest+"/dir1")
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:s390x", topDirDest+"/dir2")
|
||||
|
||||
// Sign the images. By coping fom a topDirDest/dirN, also test that non-/restricted paths
|
||||
// use the dir:"" default of insecureAcceptAnything.
|
||||
// (For signing, we must push to atomic: to get a Docker identity to use in the signature.)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "personal@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "official@example.com", topDirDest+"/dir2", "atomic:localhost:5000/myns/official:dirstaging")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging", topDirDest+"/restricted/personal")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/official:dirstaging", topDirDest+"/restricted/official")
|
||||
|
||||
// type: signedBy, with a signedIdentity override (necessary because dir: identities can't be signed)
|
||||
// Verify that correct images are accepted
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", topDirDest+"/restricted/official", topDirDest+"/dest")
|
||||
// ... and that mis-signed images are rejected.
|
||||
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
|
||||
"--policy", policy, "copy", topDirDest+"/restricted/personal", topDirDest+"/dest")
|
||||
|
||||
// Verify that the signed identity is verified.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "official@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging2")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging2", topDirDest+"/restricted/badidentity")
|
||||
assertSkopeoFails(c, ".*Source image rejected: .*Signature for identity localhost:5000/myns/personal:dirstaging2 is not accepted.*",
|
||||
"--policy", policy, "copy", topDirDest+"/restricted/badidentity", topDirDest+"/dest")
|
||||
}
|
||||
|
||||
// Compression during copy
|
||||
func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710.tar"
|
||||
|
||||
topDir, err := ioutil.TempDir("", "compression-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
for i, t := range []struct{ fixture, remote string }{
|
||||
{"uncompressed-image-s1", "docker://" + v2DockerRegistryURL + "/compression/compression:s1"},
|
||||
{"uncompressed-image-s2", "docker://" + v2DockerRegistryURL + "/compression/compression:s2"},
|
||||
{"uncompressed-image-s1", "atomic:localhost:5000/myns/compression:s1"},
|
||||
{"uncompressed-image-s2", "atomic:localhost:5000/myns/compression:s2"},
|
||||
} {
|
||||
dir := filepath.Join(topDir, fmt.Sprintf("case%d", i))
|
||||
err := os.MkdirAll(dir, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "dir:fixtures/"+t.fixture, t.remote)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", t.remote, "dir:"+dir)
|
||||
|
||||
// The original directory contained an uncompressed file, the copy after pushing and pulling doesn't (we use a different name for the compressed file).
|
||||
_, err = os.Lstat(filepath.Join("fixtures", t.fixture, uncompresssedLayerFile))
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = os.Lstat(filepath.Join(dir, uncompresssedLayerFile))
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(os.IsNotExist(err), check.Equals, true)
|
||||
|
||||
// All pulled layers are smaller than the uncompressed size of uncompresssedLayerFile. (Note that this includes the manifest in s2, but that works out OK).
|
||||
dirf, err := os.Open(dir)
|
||||
c.Assert(err, check.IsNil)
|
||||
fis, err := dirf.Readdir(-1)
|
||||
c.Assert(err, check.IsNil)
|
||||
for _, fi := range fis {
|
||||
if strings.HasSuffix(fi.Name(), ".tar") {
|
||||
c.Assert(fi.Size() < 2048, check.Equals, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func findRegularFiles(c *check.C, root string) []string {
|
||||
result := []string{}
|
||||
err := filepath.Walk(root, filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode().IsRegular() {
|
||||
result = append(result, path)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
c.Assert(err, check.IsNil)
|
||||
return result
|
||||
}
|
||||
|
||||
// --sign-by and policy use for docker: with sigstore
|
||||
func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "signatures-sigstore")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
copyDest := filepath.Join(tmpDir, "dest")
|
||||
err = os.Mkdir(copyDest, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
dirDest := "dir:" + copyDest
|
||||
plainSigstore := filepath.Join(tmpDir, "sigstore")
|
||||
splitSigstoreStaging := filepath.Join(tmpDir, "sigstore-staging")
|
||||
|
||||
splitSigstoreReadServerHandler := http.NotFoundHandler()
|
||||
splitSigstoreReadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
splitSigstoreReadServerHandler.ServeHTTP(w, r)
|
||||
}))
|
||||
defer splitSigstoreReadServer.Close()
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
registriesDir := filepath.Join(tmpDir, "registries.d")
|
||||
err = os.Mkdir(registriesDir, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
registriesFile := fileFromFixture(c, "fixtures/registries.yaml",
|
||||
map[string]string{"@sigstore@": plainSigstore, "@split-staging@": splitSigstoreStaging, "@split-read@": splitSigstoreReadServer.URL})
|
||||
err = os.Symlink(registriesFile, filepath.Join(registriesDir, "registries.yaml"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Get an image to work with. Also verifies that we can use Docker repositories with no sigstore configured.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", ourRegistry+"original/busybox")
|
||||
// Pulling an unsigned image fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest)
|
||||
|
||||
// Signing with sigstore defined succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"signed/busybox")
|
||||
// a signature file has been created,
|
||||
foundFiles := findRegularFiles(c, plainSigstore)
|
||||
c.Assert(foundFiles, check.HasLen, 1)
|
||||
// and pulling a signed image succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"signed/busybox", dirDest)
|
||||
|
||||
// Deleting the image succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "delete", ourRegistry+"signed/busybox")
|
||||
// and the signature file has been deleted (but we leave the directories around).
|
||||
foundFiles = findRegularFiles(c, plainSigstore)
|
||||
c.Assert(foundFiles, check.HasLen, 0)
|
||||
|
||||
// Signing with a read/write sigstore split succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"public/busybox")
|
||||
// and a signature file has been created.
|
||||
foundFiles = findRegularFiles(c, splitSigstoreStaging)
|
||||
c.Assert(foundFiles, check.HasLen, 1)
|
||||
// Pulling the image fails because the read sigstore URL has not been populated:
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
|
||||
// Pulling the image succeeds after the read sigstore URL is available:
|
||||
splitSigstoreReadServerHandler = http.FileServer(http.Dir(splitSigstoreStaging))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
|
||||
}
|
||||
|
||||
// atomic: and docker: X-Registry-Supports-Signatures works and interoperates
|
||||
func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that the reading/writing works using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "atomic-extension")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
for _, subdir := range []string{"dirAA", "dirAD", "dirDA", "dirDD", "registries.d"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
registriesDir := filepath.Join(topDir, "registries.d")
|
||||
dirDest := "dir:" + topDir
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
|
||||
// Get an image to work with to an atomic: destination. Also verifies that we can use Docker repositories without X-Registry-Supports-Signatures
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", "atomic:localhost:5000/myns/extension:unsigned")
|
||||
// Pulling an unsigned image using atomic: fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy,
|
||||
"copy", "atomic:localhost:5000/myns/extension:unsigned", dirDest+"/dirAA")
|
||||
// The same when pulling using docker:
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
|
||||
"copy", "docker://localhost:5000/myns/extension:unsigned", dirDest+"/dirAD")
|
||||
|
||||
// Sign the image using atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false",
|
||||
"copy", "--sign-by", "personal@example.com", "atomic:localhost:5000/myns/extension:unsigned", "atomic:localhost:5000/myns/extension:atomic")
|
||||
// Pulling the image using atomic: now succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy,
|
||||
"copy", "atomic:localhost:5000/myns/extension:atomic", dirDest+"/dirAA")
|
||||
// The same when pulling using docker:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
|
||||
"copy", "docker://localhost:5000/myns/extension:atomic", dirDest+"/dirAD")
|
||||
// Both access methods result in the same data.
|
||||
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirAA"), filepath.Join(topDir, "dirAD"))
|
||||
|
||||
// Get another image (different so that they don't share signatures, and sign it using docker://)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir,
|
||||
"copy", "--sign-by", "personal@example.com", "docker://estesp/busybox:ppc64le", "atomic:localhost:5000/myns/extension:extension")
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "oc", "get", "istag", "extension:extension", "-o", "json"))
|
||||
// Pulling the image using atomic: succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy,
|
||||
"copy", "atomic:localhost:5000/myns/extension:extension", dirDest+"/dirDA")
|
||||
// The same when pulling using docker:
|
||||
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
|
||||
"copy", "docker://localhost:5000/myns/extension:extension", dirDest+"/dirDD")
|
||||
// Both access methods result in the same data.
|
||||
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "dir:"+dir1)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopyDestWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWOTLSVerifyFalse(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
// dir:test isn't created beforehand just because we already know this could
|
||||
// just fail when evaluating the src
|
||||
assertSkopeoFails(c, ".*server gave HTTP response to HTTPS client.*",
|
||||
"copy", ourRegistry+"foobar", "dir:test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySchemaConversion(c *check.C) {
|
||||
// Test conversion / schema autodetection both for the OpenShift embedded registry…
|
||||
s.testCopySchemaConversionRegistries(c, "docker://localhost:5005/myns/schema1", "docker://localhost:5006/myns/schema2")
|
||||
// … and for various docker/distribution registry versions.
|
||||
s.testCopySchemaConversionRegistries(c, "docker://"+v2s1DockerRegistryURL+"/schema1", "docker://"+v2DockerRegistryURL+"/schema2")
|
||||
}
|
||||
|
||||
func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Registry, schema2Registry string) {
|
||||
topDir, err := ioutil.TempDir("", "schema-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
for _, subdir := range []string{"input1", "input2", "dest2"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
input1Dir := filepath.Join(topDir, "input1")
|
||||
input2Dir := filepath.Join(topDir, "input2")
|
||||
destDir := filepath.Join(topDir, "dest2")
|
||||
|
||||
// Ensure we are working with a schema2 image.
|
||||
// dir: accepts any manifest format, i.e. this makes …/input2 a schema2 source which cannot be asked to produce schema1 like ordinary docker: registries can.
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+input2Dir)
|
||||
verifyManifestMIMEType(c, input2Dir, manifest.DockerV2Schema2MediaType)
|
||||
// 2→2 (the "f2t2" in tag means "from 2 to 2")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema2Registry+":f2t2")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema2Registry+":f2t2", "dir:"+destDir)
|
||||
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema2MediaType)
|
||||
// 2→1; we will use the result as a schema1 image for further tests.
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema1Registry+":f2t1")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema1Registry+":f2t1", "dir:"+input1Dir)
|
||||
verifyManifestMIMEType(c, input1Dir, manifest.DockerV2Schema1SignedMediaType)
|
||||
// 1→1
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema1Registry+":f1t1")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema1Registry+":f1t1", "dir:"+destDir)
|
||||
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType)
|
||||
// 1→2: image stays unmodified schema1
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema2Registry+":f1t2")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema2Registry+":f1t2", "dir:"+destDir)
|
||||
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType)
|
||||
}
|
||||
|
||||
// Verify manifest in a dir: image at dir is expectedMIMEType.
|
||||
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
105
integration/fixtures/policy.json
Normal file
105
integration/fixtures/policy.json
Normal file
@@ -0,0 +1,105 @@
|
||||
{
|
||||
"default": [
|
||||
{
|
||||
"type": "reject"
|
||||
}
|
||||
],
|
||||
"transports": {
|
||||
"docker": {
|
||||
"localhost:5555": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/extension": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"docker.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
},
|
||||
"dir": {
|
||||
"/@dirpath@": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "localhost:5000/myns/official"
|
||||
}
|
||||
}
|
||||
],
|
||||
"": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
},
|
||||
"atomic": {
|
||||
"localhost:5006/myns/personal": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/official": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/naming:test1": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/naming:naming": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "localhost:5006/myns/official"
|
||||
}
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/cosigned:cosigned": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "localhost:5006/myns/official"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/extension": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
6
integration/fixtures/registries.yaml
Normal file
6
integration/fixtures/registries.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
docker:
|
||||
localhost:5555:
|
||||
sigstore: file://@sigstore@
|
||||
localhost:5555/public:
|
||||
sigstore-staging: file://@split-staging@
|
||||
sigstore: @split-read@
|
||||
Binary file not shown.
32
integration/fixtures/uncompressed-image-s1/manifest.json
Normal file
32
integration/fixtures/uncompressed-image-s1/manifest.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "nonempty",
|
||||
"tag": "nonempty",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"59c20544b2f4\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"59c20544b2f4ad7a8639433bacb1ec215b7dad4a7bf1a83b5ab4679329a46c1d\",\"container_config\":{\"Hostname\":\"59c20544b2f4\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"created\":\"2016-09-19T18:23:54.9949213Z\",\"docker_version\":\"1.10.3\",\"id\":\"4c224eac5061bb85f523ca4e3316618fd7921a80fe94286979667b1edb8e1bdd\",\"os\":\"linux\"}"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "DGWZ:GAUM:WCOC:IMDL:D67M:CEI6:YTVH:M2CM:5HX4:FYDD:77OD:D3F7",
|
||||
"kty": "EC",
|
||||
"x": "eprZNqLO9mHZ4Z4GxefucEgov_1gwEi9lehpJR2suRo",
|
||||
"y": "wIr2ucNg32ROfVCkR_8A5VbBJ-mFmsoIUVa6vt8lIxM"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "bvTLWW4YVFRjAanN1EJqwQw60fWSWJPxcGO3UZGFI_gyV6ucGdW4x7jyYL6g06sg925s9cy0wN1lw91CCFv4BA",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjE0ODcsImZvcm1hdFRhaWwiOiJDbjBLIiwidGltZSI6IjIwMTYtMDktMTlUMTg6NDM6MzNaIn0"
|
||||
}
|
||||
]
|
||||
}
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
{"architecture":"amd64","config":{"Hostname":"59c20544b2f4","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"59c20544b2f4ad7a8639433bacb1ec215b7dad4a7bf1a83b5ab4679329a46c1d","container_config":{"Hostname":"59c20544b2f4","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2016-09-19T18:23:54.9949213Z","docker_version":"1.10.3","history":[{"created":"2016-09-19T18:23:54.9949213Z","created_by":"/bin/sh -c #(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"]}}
|
||||
16
integration/fixtures/uncompressed-image-s2/manifest.json
Normal file
16
integration/fixtures/uncompressed-image-s2/manifest.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/octet-stream",
|
||||
"size": 1272,
|
||||
"digest": "sha256:86ce150e65c72b30f885c261449d18b7c6832596916e7f654e08377b5a67b4ff"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 2048,
|
||||
"digest": "sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
}
|
||||
]
|
||||
}
|
||||
252
integration/openshift.go
Normal file
252
integration/openshift.go
Normal file
@@ -0,0 +1,252 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
var adminKUBECONFIG = map[string]string{
|
||||
"KUBECONFIG": "openshift.local.config/master/admin.kubeconfig",
|
||||
}
|
||||
|
||||
// openshiftCluster is an OpenShift API master and integrated registry
|
||||
// running on localhost.
|
||||
type openshiftCluster struct {
|
||||
workingDir string
|
||||
processes []*exec.Cmd // Processes to terminate on teardown; append to the end, terminate from end to the start.
|
||||
}
|
||||
|
||||
// startOpenshiftCluster creates a new openshiftCluster.
|
||||
// WARNING: This affects state in users' home directory! Only run
|
||||
// in isolated test environment.
|
||||
func startOpenshiftCluster(c *check.C) *openshiftCluster {
|
||||
cluster := &openshiftCluster{}
|
||||
|
||||
dir, err := ioutil.TempDir("", "openshift-cluster")
|
||||
c.Assert(err, check.IsNil)
|
||||
cluster.workingDir = dir
|
||||
|
||||
cluster.startMaster(c)
|
||||
cluster.prepareRegistryConfig(c)
|
||||
cluster.startRegistry(c)
|
||||
cluster.ocLoginToProject(c)
|
||||
cluster.dockerLogin(c)
|
||||
cluster.relaxImageSignerPermissions(c)
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
// clusterCmd creates an exec.Cmd in cluster.workingDir with current environment modified by environment
|
||||
func (cluster *openshiftCluster) clusterCmd(env map[string]string, name string, args ...string) *exec.Cmd {
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = cluster.workingDir
|
||||
cmd.Env = os.Environ()
|
||||
for key, value := range env {
|
||||
cmd.Env = modifyEnviron(cmd.Env, key, value)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// startMaster starts the OpenShift master (etcd+API server) and waits for it to be ready, or terminates on failure.
|
||||
func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
cmd := cluster.clusterCmd(nil, "openshift", "start", "master")
|
||||
cluster.processes = append(cluster.processes, cmd)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
// Send both to the same pipe. This might cause the two streams to be mixed up,
|
||||
// but logging actually goes only to stderr - this primarily ensure we log any
|
||||
// unexpected output to stdout.
|
||||
cmd.Stderr = cmd.Stdout
|
||||
err = cmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
portOpen, terminatePortCheck := newPortChecker(c, 8443)
|
||||
defer func() {
|
||||
c.Logf("Terminating port check")
|
||||
terminatePortCheck <- true
|
||||
}()
|
||||
|
||||
terminateLogCheck := make(chan bool, 1)
|
||||
logCheckFound := make(chan bool)
|
||||
go func() {
|
||||
defer func() {
|
||||
c.Logf("Log checker exiting")
|
||||
}()
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
c.Logf("Log line: %s", line)
|
||||
if strings.Contains(line, "Started Origin Controllers") {
|
||||
logCheckFound <- true
|
||||
return
|
||||
// FIXME? We stop reading from stdout; could this block the master?
|
||||
}
|
||||
// Note: we can block before we get here.
|
||||
select {
|
||||
case <-terminateLogCheck:
|
||||
c.Logf("terminated")
|
||||
return
|
||||
default:
|
||||
// Do not block here and read the next line.
|
||||
}
|
||||
}
|
||||
logCheckFound <- false
|
||||
}()
|
||||
defer func() {
|
||||
c.Logf("Terminating log check")
|
||||
terminateLogCheck <- true
|
||||
}()
|
||||
|
||||
gotPortCheck := false
|
||||
gotLogCheck := false
|
||||
for !gotPortCheck || !gotLogCheck {
|
||||
c.Logf("Waiting for master")
|
||||
select {
|
||||
case <-portOpen:
|
||||
c.Logf("port check done")
|
||||
gotPortCheck = true
|
||||
case found := <-logCheckFound:
|
||||
c.Logf("log check done, found: %t", found)
|
||||
if !found {
|
||||
c.Fatal("log check done, success message not found")
|
||||
}
|
||||
gotLogCheck = true
|
||||
}
|
||||
}
|
||||
c.Logf("OK, master started!")
|
||||
}
|
||||
|
||||
// prepareRegistryConfig creates a registry service account and a related k8s client configuration in ${cluster.workingDir}/openshift.local.registry.
|
||||
func (cluster *openshiftCluster) prepareRegistryConfig(c *check.C) {
|
||||
// This partially mimics the objects created by (oadm registry), except that we run the
|
||||
// server directly as an ordinary process instead of a pod with an implicitly attached service account.
|
||||
saJSON := `{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "registry"
|
||||
}
|
||||
}`
|
||||
cmd := cluster.clusterCmd(adminKUBECONFIG, "oc", "create", "-f", "-")
|
||||
runExecCmdWithInput(c, cmd, saJSON)
|
||||
|
||||
cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-user", "system:registry", "-z", "registry")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.Assert(string(out), check.Equals, "cluster role \"system:registry\" added: \"registry\"\n")
|
||||
|
||||
cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "create-api-client-config", "--client-dir=openshift.local.registry", "--basename=openshift-registry", "--user=system:serviceaccount:default:registry")
|
||||
out, err = cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.Assert(string(out), check.Equals, "")
|
||||
}
|
||||
|
||||
// startRegistry starts the OpenShift registry with configPart on port, waits for it to be ready, and returns the process object, or terminates on failure.
|
||||
func (cluster *openshiftCluster) startRegistryProcess(c *check.C, port int, configPath string) *exec.Cmd {
|
||||
cmd := cluster.clusterCmd(map[string]string{
|
||||
"KUBECONFIG": "openshift.local.registry/openshift-registry.kubeconfig",
|
||||
"DOCKER_REGISTRY_URL": fmt.Sprintf("127.0.0.1:%d", port),
|
||||
}, "dockerregistry", configPath)
|
||||
consumeAndLogOutputs(c, fmt.Sprintf("registry-%d", port), cmd)
|
||||
err := cmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
portOpen, terminatePortCheck := newPortChecker(c, port)
|
||||
defer func() {
|
||||
terminatePortCheck <- true
|
||||
}()
|
||||
c.Logf("Waiting for registry to start")
|
||||
<-portOpen
|
||||
c.Logf("OK, Registry port open")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// startRegistry starts the OpenShift registry and waits for it to be ready, or terminates on failure.
|
||||
func (cluster *openshiftCluster) startRegistry(c *check.C) {
|
||||
// Our “primary” registry
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5000, "/atomic-registry-config.yml"))
|
||||
|
||||
// A registry configured with acceptschema2:false
|
||||
schema1Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{
|
||||
"addr: :5000": "addr: :5005",
|
||||
"rootdirectory: /registry": "rootdirectory: /registry-schema1",
|
||||
// The default configuration currently already contains acceptschema2: false
|
||||
})
|
||||
// Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated.
|
||||
configContents, err := ioutil.ReadFile(schema1Config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*")
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config))
|
||||
|
||||
// A registry configured with acceptschema2:true
|
||||
schema2Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{
|
||||
"addr: :5000": "addr: :5006",
|
||||
"rootdirectory: /registry": "rootdirectory: /registry-schema2",
|
||||
"acceptschema2: false": "acceptschema2: true",
|
||||
})
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5006, schema2Config))
|
||||
}
|
||||
|
||||
// ocLogin runs (oc login) and (oc new-project) on the cluster, or terminates on failure.
|
||||
func (cluster *openshiftCluster) ocLoginToProject(c *check.C) {
|
||||
c.Logf("oc login")
|
||||
cmd := cluster.clusterCmd(nil, "oc", "login", "--certificate-authority=openshift.local.config/master/ca.crt", "-u", "myuser", "-p", "mypw", "https://localhost:8443")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
c.Assert(string(out), check.Matches, "(?s).*Login successful.*") // (?s) : '.' will also match newlines
|
||||
|
||||
outString := combinedOutputOfCommand(c, "oc", "new-project", "myns")
|
||||
c.Assert(outString, check.Matches, `(?s).*Now using project "myns".*`) // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
// dockerLogin simulates (docker login) to the cluster, or terminates on failure.
|
||||
// We do not run (docker login) directly, because that requires a running daemon and a docker package.
|
||||
func (cluster *openshiftCluster) dockerLogin(c *check.C) {
|
||||
dockerDir := filepath.Join(homedir.Get(), ".docker")
|
||||
err := os.Mkdir(dockerDir, 0700)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "oc", "config", "view", "-o", "json", "-o", "jsonpath={.users[*].user.token}")
|
||||
c.Logf("oc config value: %s", out)
|
||||
authValue := base64.StdEncoding.EncodeToString([]byte("unused:" + out))
|
||||
auths := []string{}
|
||||
for _, port := range []int{5000, 5005, 5006} {
|
||||
auths = append(auths, fmt.Sprintf(`"localhost:%d": {
|
||||
"auth": "%s",
|
||||
"email": "unused"
|
||||
}`, port, authValue))
|
||||
}
|
||||
configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}`
|
||||
err = ioutil.WriteFile(filepath.Join(dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
// relaxImageSignerPermissions opens up the system:image-signer permissions so that
|
||||
// anyone can work with signatures
|
||||
// FIXME: This also allows anyone to DoS anyone else; this design is really not all
|
||||
// that workable, but it is the best we can do for now.
|
||||
func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) {
|
||||
cmd := cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-group", "system:image-signer", "system:authenticated")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.Assert(string(out), check.Equals, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n")
|
||||
}
|
||||
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
func (cluster *openshiftCluster) tearDown(c *check.C) {
|
||||
for i := len(cluster.processes) - 1; i >= 0; i-- {
|
||||
cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.workingDir != "" {
|
||||
os.RemoveAll(cluster.workingDir)
|
||||
}
|
||||
}
|
||||
40
integration/openshift_shell_test.go
Normal file
40
integration/openshift_shell_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// +build openshift_shell
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
/*
|
||||
TestRunShell is not really a test; it is a convenient way to use the registry setup code
|
||||
in openshift.go and CopySuite to get an interactive environment for experimentation.
|
||||
|
||||
To use it, run:
|
||||
sudo make shell
|
||||
to start a container, then within the container:
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
cd ..; make binary-local install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
curl -L -v 'http://localhost:5000/v2/'
|
||||
cat ~/.docker/config.json
|
||||
curl -L -v 'http://localhost:5000/openshift/token&scope=repository:myns/personal:pull' --header 'Authorization: Basic $auth_from_docker'
|
||||
curl -L -v 'http://localhost:5000/v2/myns/personal/manifests/personal' --header 'Authorization: Bearer $token_from_oauth'
|
||||
curl -L -v 'http://localhost:5000/extensions/v2/myns/personal/signatures/$manifest_digest' --header 'Authorization: Bearer $token_from_oauth'
|
||||
*/
|
||||
func (s *CopySuite) TestRunShell(c *check.C) {
|
||||
cmd := exec.Command("bash", "-i")
|
||||
tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
cmd.Stdin = tty
|
||||
cmd.Stdout = tty
|
||||
cmd.Stderr = tty
|
||||
err = cmd.Run()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -13,23 +13,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
binaryV1 = "docker-registry"
|
||||
binaryV2 = "registry-v2"
|
||||
binaryV2Schema1 = "registry-v2-schema1"
|
||||
)
|
||||
|
||||
type testRegistryV1 struct {
|
||||
cmd *exec.Cmd
|
||||
url string
|
||||
dir string
|
||||
}
|
||||
|
||||
func setupRegistryV1At(c *check.C, url string, auth bool) *testRegistryV1 {
|
||||
return &testRegistryV1{
|
||||
url: url,
|
||||
}
|
||||
}
|
||||
|
||||
type testRegistryV2 struct {
|
||||
cmd *exec.Cmd
|
||||
url string
|
||||
@@ -44,7 +31,7 @@ func setupRegistryV2At(c *check.C, url string, auth, schema1 bool) *testRegistry
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Wait for registry to be ready to serve requests.
|
||||
for i := 0; i != 5; i++ {
|
||||
for i := 0; i != 50; i++ {
|
||||
if err = reg.Ping(); err == nil {
|
||||
break
|
||||
}
|
||||
@@ -67,6 +54,8 @@ loglevel: debug
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: %s
|
||||
delete:
|
||||
enabled: true
|
||||
http:
|
||||
addr: %s
|
||||
%s`
|
||||
@@ -107,6 +96,7 @@ http:
|
||||
}
|
||||
|
||||
cmd := exec.Command(binary, confPath)
|
||||
consumeAndLogOutputs(c, fmt.Sprintf("registry-%s", url), cmd)
|
||||
if err := cmd.Start(); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
if os.IsNotExist(err) {
|
||||
|
||||
@@ -2,12 +2,13 @@ package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
@@ -35,27 +36,7 @@ func findFingerprint(lineBytes []byte) (string, error) {
|
||||
return "", errors.New("No fingerprint found")
|
||||
}
|
||||
|
||||
// ConsumeAndLogOutput takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
|
||||
func ConsumeAndLogOutput(c *check.C, id string, f io.ReadCloser, err error) {
|
||||
c.Assert(err, check.IsNil)
|
||||
go func() {
|
||||
defer func() {
|
||||
f.Close()
|
||||
c.Logf("Output %s: Closed", id)
|
||||
}()
|
||||
buf := make([]byte, 0, 1024)
|
||||
for {
|
||||
c.Logf("Output %s: waiting", id)
|
||||
n, err := f.Read(buf)
|
||||
c.Logf("Output %s: got %d,%#v: %#v", id, n, err, buf[:n])
|
||||
if n <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *SigningSuite) SetUpTest(c *check.C) {
|
||||
func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -63,21 +44,7 @@ func (s *SigningSuite) SetUpTest(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
cmd := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
stdin, err := cmd.StdinPipe()
|
||||
c.Assert(err, check.IsNil)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
ConsumeAndLogOutput(c, "gen-key stdout", stdout, err)
|
||||
stderr, err := cmd.StderrPipe()
|
||||
ConsumeAndLogOutput(c, "gen-key stderr", stderr, err)
|
||||
err = cmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = stdin.Write([]byte("Key-Type: RSA\nName-Real: Testing user\n%commit\n"))
|
||||
c.Assert(err, check.IsNil)
|
||||
err = stdin.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
err = cmd.Wait()
|
||||
c.Assert(err, check.IsNil)
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -85,7 +52,7 @@ func (s *SigningSuite) SetUpTest(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SigningSuite) TearDownTest(c *check.C) {
|
||||
func (s *SigningSuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
err := os.RemoveAll(s.gpgHome)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -96,19 +63,23 @@ func (s *SigningSuite) TearDownTest(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SigningSuite) TestSignVerifySmoke(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/smoketest"
|
||||
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err := exec.Command(skopeoBinary, "standalone-sign", "-o", sigOutput.Name(),
|
||||
manifestPath, dockerReference, s.fingerprint).CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
c.Assert(string(out), check.Equals, "")
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", sigOutput.Name(),
|
||||
manifestPath, dockerReference, s.fingerprint)
|
||||
|
||||
out, err = exec.Command(skopeoBinary, "standalone-verify", manifestPath,
|
||||
dockerReference, s.fingerprint, sigOutput.Name()).CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
c.Assert(string(out), check.Equals, "Signature verified, digest "+TestImageManifestDigest+"\n")
|
||||
expected := fmt.Sprintf("^Signature verified, digest %s\n$", TestImageManifestDigest)
|
||||
assertSkopeoSucceeds(c, expected, "standalone-verify", manifestPath,
|
||||
dockerReference, s.fingerprint, sigOutput.Name())
|
||||
}
|
||||
|
||||
176
integration/utils.go
Normal file
176
integration/utils.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
const skopeoBinary = "skopeo"
|
||||
|
||||
// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
|
||||
func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) {
|
||||
c.Assert(err, check.IsNil)
|
||||
go func() {
|
||||
defer func() {
|
||||
f.Close()
|
||||
c.Logf("Output %s: Closed", id)
|
||||
}()
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
c.Logf("Output %s: waiting", id)
|
||||
n, err := f.Read(buf)
|
||||
c.Logf("Output %s: got %d,%#v: %s", id, n, err, strings.TrimSuffix(string(buf[:n]), "\n"))
|
||||
if n <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// consumeAndLogOutputs causes all output to stdout and stderr from an *exec.Cmd to be logged to c
|
||||
func consumeAndLogOutputs(c *check.C, id string, cmd *exec.Cmd) {
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
consumeAndLogOutputStream(c, id+" stdout", stdout, err)
|
||||
stderr, err := cmd.StderrPipe()
|
||||
consumeAndLogOutputStream(c, id+" stderr", stderr, err)
|
||||
}
|
||||
|
||||
// combinedOutputOfCommand runs a command as if exec.Command().CombinedOutput(), verifies that the exit status is 0, and returns the output,
|
||||
// or terminates c on failure.
|
||||
func combinedOutputOfCommand(c *check.C, name string, args ...string) string {
|
||||
c.Logf("Running %s %s", name, strings.Join(args, " "))
|
||||
out, err := exec.Command(name, args...).CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// assertSkopeoSucceeds runs a skopeo command as if exec.Command().CombinedOutput, verifies that the exit status is 0,
|
||||
// and optionally that the output matches a multi-line regexp if it is nonempty;
|
||||
// or terminates c on failure
|
||||
func assertSkopeoSucceeds(c *check.C, regexp string, args ...string) {
|
||||
c.Logf("Running %s %s", skopeoBinary, strings.Join(args, " "))
|
||||
out, err := exec.Command(skopeoBinary, args...).CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
if regexp != "" {
|
||||
c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines
|
||||
}
|
||||
}
|
||||
|
||||
// assertSkopeoFails runs a skopeo command as if exec.Command().CombinedOutput, verifies that the exit status is 0,
|
||||
// and that the output matches a multi-line regexp;
|
||||
// or terminates c on failure
|
||||
func assertSkopeoFails(c *check.C, regexp string, args ...string) {
|
||||
c.Logf("Running %s %s", skopeoBinary, strings.Join(args, " "))
|
||||
out, err := exec.Command(skopeoBinary, args...).CombinedOutput()
|
||||
c.Assert(err, check.NotNil, check.Commentf("%s", out))
|
||||
c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
// runCommandWithInput runs a command as if exec.Command(), sending it the input to stdin,
|
||||
// and verifies that the exit status is 0, or terminates c on failure.
|
||||
func runCommandWithInput(c *check.C, input string, name string, args ...string) {
|
||||
cmd := exec.Command(name, args...)
|
||||
runExecCmdWithInput(c, cmd, input)
|
||||
}
|
||||
|
||||
// runExecCmdWithInput runs an exec.Cmd, sending it the input to stdin,
|
||||
// and verifies that the exit status is 0, or terminates c on failure.
|
||||
func runExecCmdWithInput(c *check.C, cmd *exec.Cmd, input string) {
|
||||
c.Logf("Running %s %s", cmd.Path, strings.Join(cmd.Args, " "))
|
||||
consumeAndLogOutputs(c, cmd.Path+" "+strings.Join(cmd.Args, " "), cmd)
|
||||
stdin, err := cmd.StdinPipe()
|
||||
c.Assert(err, check.IsNil)
|
||||
err = cmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = stdin.Write([]byte(input))
|
||||
c.Assert(err, check.IsNil)
|
||||
err = stdin.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
err = cmd.Wait()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
// isPortOpen returns true iff the specified port on localhost is open.
|
||||
func isPortOpen(port int) bool {
|
||||
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// newPortChecker sets up a portOpen channel which will receive true after the specified port is open.
|
||||
// The checking can be aborted by sending a value to the terminate channel, which the caller should
|
||||
// always do using
|
||||
// defer func() {terminate <- true}()
|
||||
func newPortChecker(c *check.C, port int) (portOpen <-chan bool, terminate chan<- bool) {
|
||||
portOpenBidi := make(chan bool)
|
||||
// Buffered, so that sending a terminate request after the goroutine has exited does not block.
|
||||
terminateBidi := make(chan bool, 1)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
c.Logf("Port checker for port %d exiting", port)
|
||||
}()
|
||||
for {
|
||||
c.Logf("Checking for port %d...", port)
|
||||
if isPortOpen(port) {
|
||||
c.Logf("Port %d open", port)
|
||||
portOpenBidi <- true
|
||||
return
|
||||
}
|
||||
c.Logf("Sleeping for port %d", port)
|
||||
sleepChan := time.After(100 * time.Millisecond)
|
||||
select {
|
||||
case <-sleepChan: // Try again
|
||||
c.Logf("Sleeping for port %d done, will retry", port)
|
||||
case <-terminateBidi:
|
||||
c.Logf("Check for port %d terminated", port)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return portOpenBidi, terminateBidi
|
||||
}
|
||||
|
||||
// modifyEnviron modifies os.Environ()-like list of name=value assignments to set name to value.
|
||||
func modifyEnviron(env []string, name, value string) []string {
|
||||
prefix := name + "="
|
||||
res := []string{}
|
||||
for _, e := range env {
|
||||
if !strings.HasPrefix(e, prefix) {
|
||||
res = append(res, e)
|
||||
}
|
||||
}
|
||||
return append(res, prefix+value)
|
||||
}
|
||||
|
||||
// fileFromFixtureFixture applies edits to inputPath and returns a path to the temporary file.
|
||||
// Callers should defer os.Remove(the_returned_path)
|
||||
func fileFromFixture(c *check.C, inputPath string, edits map[string]string) string {
|
||||
contents, err := ioutil.ReadFile(inputPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
for template, value := range edits {
|
||||
updated := bytes.Replace(contents, []byte(template), []byte(value), -1)
|
||||
c.Assert(bytes.Equal(updated, contents), check.Equals, false, check.Commentf("Replacing %s in %#v failed", template, string(contents))) // Verify that the template has matched something and we are not silently ignoring it.
|
||||
contents = updated
|
||||
}
|
||||
|
||||
file, err := ioutil.TempFile("", "policy.json")
|
||||
c.Assert(err, check.IsNil)
|
||||
path := file.Name()
|
||||
|
||||
_, err = file.Write(contents)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = file.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
return path
|
||||
}
|
||||
118
man1/skopeo.1
118
man1/skopeo.1
@@ -1,118 +0,0 @@
|
||||
.\" To review this file formatted
|
||||
.\" groff -man -Tascii skopeo.1
|
||||
.\"
|
||||
.de FN
|
||||
\fI\|\\$1\|\fP
|
||||
..
|
||||
.TH "skopeo" "1" "2016-04-21" "Linux" "Linux Programmer's Manual"
|
||||
.SH NAME
|
||||
skopeo \(em Inspect Docker images and repositories on registries
|
||||
.SH SYNOPSIS
|
||||
\fBskopeo copy\fR [\fB--sign-by=\fRkey-ID] source-location destination-location
|
||||
.PP
|
||||
\fBskopeo inspect\fR image-name [\fB--raw\fR]
|
||||
.PP
|
||||
\fBskopeo layers\fR image-name
|
||||
.PP
|
||||
\fBskopeo standalone-sign\fR manifest docker-reference key-fingerprint \%\fB--output\fR|\fB-o\fR signature
|
||||
.PP
|
||||
\fBskopeo standalone-verify\fR manifest docker-reference key-fingerprint \%signature
|
||||
.PP
|
||||
\fBskopeo help\fR [command]
|
||||
.SH DESCRIPTION
|
||||
\fBskopeo\fR is a command line utility which is able to inspect a repository on a Docker registry and fetch images
|
||||
layers. It fetches the repository's manifest and it is able to show you a \fBdocker inspect\fR-like json output about a
|
||||
whole repository or a tag. This tool, in contrast to \fBdocker inspect\fR, helps you gather useful information about a
|
||||
repository or a tag without requiring you to run \fBdocker pull\fR - e.g. - which tags are available for the given
|
||||
repository? which labels the image has?
|
||||
.SH OPTIONS
|
||||
.B "--debug"
|
||||
enable debug output
|
||||
.PP
|
||||
.B "--username"
|
||||
Username to use to authenicate to the given registry
|
||||
.PP
|
||||
.B --password
|
||||
Password to use to authenicate to the given registry
|
||||
.PP
|
||||
.B "--cert-path"
|
||||
Path to certificates to use to authenicate to the given registry (cert.pem, key.pem)
|
||||
.PP
|
||||
.B "--tls-verify"
|
||||
Whether to verify certificates or not
|
||||
.PP
|
||||
.B "--help, -h"
|
||||
Show help
|
||||
.PP
|
||||
.B "--version, -v"
|
||||
print the version number
|
||||
.SH COMMANDS
|
||||
.TP
|
||||
.B copy
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
.sp
|
||||
.B source-location
|
||||
and
|
||||
.B destination-location
|
||||
can be \fBdocker://\fRdocker-reference, \fBdir:\fRlocal-path, or \fBatomic:\fRimagestream-name\fB:\fRtag .
|
||||
.sp
|
||||
\fB\-\-sign\-by=\fRkey-id
|
||||
Add a signature by the specified key ID for image name corresponding to \fBdestination-location\fR.
|
||||
Existing signatures, if any, are preserved as well.
|
||||
.TP
|
||||
.B inspect
|
||||
Return low-level information on images in a registry
|
||||
.sp
|
||||
.B image-name
|
||||
name of image to retrieve information about
|
||||
.br
|
||||
.B "--raw"
|
||||
output raw manifest, default is to format in JSON
|
||||
.TP
|
||||
.B layers
|
||||
Get image layers
|
||||
.sp
|
||||
.B image-name
|
||||
name of the image to retrieve layers
|
||||
.TP
|
||||
.B standalone-sign
|
||||
Create a signature using local files.
|
||||
This is primarily a debugging tool and should not be part of your normal operational workflow.
|
||||
.sp
|
||||
.B manifest
|
||||
path to file containing manifest of image
|
||||
.br
|
||||
.B docker-reference
|
||||
docker reference of blob to be signed
|
||||
.br
|
||||
.B key-fingerprint
|
||||
key identity to use for signing
|
||||
.br
|
||||
.B ""--output, -o"
|
||||
write signature to given file
|
||||
.TP
|
||||
.B standalone-verify
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
This is primarily a debugging tool and should not be part of your normal operational workflow.
|
||||
.sp
|
||||
.B manifest
|
||||
Path to file containing manifest of image
|
||||
.br
|
||||
.B docker-reference
|
||||
docker reference of signed blob
|
||||
.br
|
||||
.B key-fingerprint
|
||||
key identity to use for verification
|
||||
.br
|
||||
.B signature
|
||||
Path to file containing signature
|
||||
.TP
|
||||
.B help
|
||||
show help for \fBskopeo\fR
|
||||
.SH AUTHORS
|
||||
Antonio Murdaca <runcom@redhat.com>
|
||||
.br
|
||||
Miloslav Trmac <mitr@redhat.com>
|
||||
.br
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
@@ -1,398 +0,0 @@
|
||||
package openshift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/projectatomic/skopeo/docker"
|
||||
"github.com/projectatomic/skopeo/docker/utils"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
)
|
||||
|
||||
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
|
||||
type openshiftClient struct {
|
||||
// Values from Kubernetes configuration
|
||||
baseURL *url.URL
|
||||
httpClient *http.Client
|
||||
bearerToken string // "" if not used
|
||||
username string // "" if not used
|
||||
password string // if username != ""
|
||||
// Values specific to this image
|
||||
namespace string
|
||||
stream string
|
||||
tag string
|
||||
}
|
||||
|
||||
// FIXME: Is imageName like this a good way to refer to OpenShift images?
|
||||
var imageNameRegexp = regexp.MustCompile("^([^:/]*)/([^:/]*):([^:/]*)$")
|
||||
|
||||
// newOpenshiftClient creates a new openshiftClient for the specified image.
|
||||
func newOpenshiftClient(imageName string) (*openshiftClient, error) {
|
||||
// Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client.
|
||||
cmdConfig := defaultClientConfig()
|
||||
logrus.Debugf("cmdConfig: %#v", cmdConfig)
|
||||
restConfig, err := cmdConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.)
|
||||
logrus.Debugf("restConfig: %#v", restConfig)
|
||||
baseURL, httpClient, err := restClientFor(restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("URL: %#v", *baseURL)
|
||||
|
||||
m := imageNameRegexp.FindStringSubmatch(imageName)
|
||||
if m == nil || len(m) != 4 {
|
||||
return nil, fmt.Errorf("Invalid image reference %s, %#v", imageName, m)
|
||||
}
|
||||
|
||||
return &openshiftClient{
|
||||
baseURL: baseURL,
|
||||
httpClient: httpClient,
|
||||
bearerToken: restConfig.BearerToken,
|
||||
username: restConfig.Username,
|
||||
password: restConfig.Password,
|
||||
|
||||
namespace: m[1],
|
||||
stream: m[2],
|
||||
tag: m[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
|
||||
func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]byte, error) {
|
||||
url := *c.baseURL
|
||||
url.Path = path
|
||||
var requestBodyReader io.Reader
|
||||
if requestBody != nil {
|
||||
logrus.Debugf("Will send body: %s", requestBody)
|
||||
requestBodyReader = bytes.NewReader(requestBody)
|
||||
}
|
||||
req, err := http.NewRequest(method, url.String(), requestBodyReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(c.bearerToken) != 0 {
|
||||
req.Header.Set("Authorization", "Bearer "+c.bearerToken)
|
||||
} else if len(c.username) != 0 {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
req.Header.Set("Accept", "application/json, */*")
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version))
|
||||
if requestBody != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
logrus.Debugf("%s %s", method, url)
|
||||
res, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Got body: %s", body)
|
||||
// FIXME: Just throwing this useful information away only to try to guess later...
|
||||
logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type"))
|
||||
|
||||
var status status
|
||||
statusValid := false
|
||||
if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 {
|
||||
statusValid = true
|
||||
}
|
||||
|
||||
switch {
|
||||
case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient.
|
||||
if statusValid && status.Status != "Success" {
|
||||
return nil, errors.New(status.Message)
|
||||
}
|
||||
case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent:
|
||||
// OK.
|
||||
default:
|
||||
if statusValid {
|
||||
return nil, errors.New(status.Message)
|
||||
}
|
||||
return nil, fmt.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// canonicalDockerReference returns a canonical reference we use for signing OpenShift images.
|
||||
// FIXME: This is, strictly speaking, a namespace conflict with images placed in a Docker registry running on the same host.
|
||||
// Do we need to do something else, perhaps disambiguate (port number?) or namespace Docker and OpenShift separately?
|
||||
func (c *openshiftClient) canonicalDockerReference() string {
|
||||
return fmt.Sprintf("%s/%s/%s:%s", c.baseURL.Host, c.namespace, c.stream, c.tag)
|
||||
}
|
||||
|
||||
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
|
||||
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
|
||||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||
parts := strings.SplitN(ref, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||
}
|
||||
// Sanity check that the reference is at least plausibly similar, i.e. uses the hard-coded port we expect.
|
||||
if !strings.HasSuffix(parts[0], ":5000") {
|
||||
return "", fmt.Errorf("Invalid format of docker reference %s: expecting port 5000", ref)
|
||||
}
|
||||
return c.dockerRegistryHostPart() + "/" + parts[1], nil
|
||||
}
|
||||
|
||||
// dockerRegistryHostPart returns the host:port of the embedded Docker Registry API endpoint
|
||||
// FIXME: There seems to be no way to discover the correct:host port using the API, so hard-code our knowledge
|
||||
// about how the OpenShift Atomic Registry is configured, per examples/atomic-registry/run.sh:
|
||||
// -p OPENSHIFT_OAUTH_PROVIDER_URL=https://${INSTALL_HOST}:8443,COCKPIT_KUBE_URL=https://${INSTALL_HOST},REGISTRY_HOST=${INSTALL_HOST}:5000
|
||||
func (c *openshiftClient) dockerRegistryHostPart() string {
|
||||
return strings.SplitN(c.baseURL.Host, ":", 2)[0] + ":5000"
|
||||
}
|
||||
|
||||
type openshiftImageSource struct {
|
||||
client *openshiftClient
|
||||
// Values specific to this image
|
||||
certPath string // Only for parseDockerImageSource
|
||||
tlsVerify bool // Only for parseDockerImageSource
|
||||
// State
|
||||
docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
|
||||
imageStreamImageName string // Resolved image identifier, or "" if not known yet
|
||||
}
|
||||
|
||||
// NewOpenshiftImageSource creates a new ImageSource for the specified image and connection specification.
|
||||
func NewOpenshiftImageSource(imageName, certPath string, tlsVerify bool) (types.ImageSource, error) {
|
||||
client, err := newOpenshiftClient(imageName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &openshiftImageSource{
|
||||
client: client,
|
||||
certPath: certPath,
|
||||
tlsVerify: tlsVerify,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IntendedDockerReference returns the full, unambiguous, Docker reference for this image, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
// May be "" if unknown.
|
||||
func (s *openshiftImageSource) IntendedDockerReference() string {
|
||||
return s.client.canonicalDockerReference()
|
||||
}
|
||||
|
||||
func (s *openshiftImageSource) GetManifest(mimetypes []string) ([]byte, string, error) {
|
||||
if err := s.ensureImageIsResolved(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return s.docker.GetManifest(mimetypes)
|
||||
}
|
||||
|
||||
func (s *openshiftImageSource) GetLayer(digest string) (io.ReadCloser, error) {
|
||||
if err := s.ensureImageIsResolved(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.docker.GetLayer(digest)
|
||||
}
|
||||
|
||||
func (s *openshiftImageSource) GetSignatures() ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
||||
func (s *openshiftImageSource) ensureImageIsResolved() error {
|
||||
if s.docker != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FIXME: validate components per validation.IsValidPathSegmentName?
|
||||
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.namespace, s.client.stream)
|
||||
body, err := s.client.doRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: This does absolutely no kind/version checking or conversions.
|
||||
var is imageStream
|
||||
if err := json.Unmarshal(body, &is); err != nil {
|
||||
return err
|
||||
}
|
||||
var te *tagEvent
|
||||
for _, tag := range is.Status.Tags {
|
||||
if tag.Tag != s.client.tag {
|
||||
continue
|
||||
}
|
||||
if len(tag.Items) > 0 {
|
||||
te = &tag.Items[0]
|
||||
break
|
||||
}
|
||||
}
|
||||
if te == nil {
|
||||
return fmt.Errorf("No matching tag found")
|
||||
}
|
||||
logrus.Debugf("tag event %#v", te)
|
||||
dockerRef, err := s.client.convertDockerImageReference(te.DockerImageReference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Resolved reference %#v", dockerRef)
|
||||
d, err := docker.NewDockerImageSource(dockerRef, s.certPath, s.tlsVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.docker = d
|
||||
s.imageStreamImageName = te.Image
|
||||
return nil
|
||||
}
|
||||
|
||||
type openshiftImageDestination struct {
|
||||
client *openshiftClient
|
||||
docker types.ImageDestination // The Docker Registry endpoint
|
||||
}
|
||||
|
||||
// NewOpenshiftImageDestination creates a new ImageDestination for the specified image and connection specification.
|
||||
func NewOpenshiftImageDestination(imageName, certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
||||
client, err := newOpenshiftClient(imageName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
|
||||
// i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
|
||||
// the manifest digest at this point.
|
||||
dockerRef := fmt.Sprintf("%s/%s/%s:%s", client.dockerRegistryHostPart(), client.namespace, client.stream, client.tag)
|
||||
docker, err := docker.NewDockerImageDestination(dockerRef, certPath, tlsVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &openshiftImageDestination{
|
||||
client: client,
|
||||
docker: docker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) CanonicalDockerReference() (string, error) {
|
||||
return d.client.canonicalDockerReference(), nil
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) PutManifest(manifest []byte) error {
|
||||
// Note: This does absolutely no kind/version checking or conversions.
|
||||
manifestDigest, err := utils.ManifestDigest(manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: We can't do what respositorymiddleware.go does because we don't know the internal address. Does any of this matter?
|
||||
dockerImageReference := fmt.Sprintf("%s/%s/%s@%s", d.client.dockerRegistryHostPart(), d.client.namespace, d.client.stream, manifestDigest)
|
||||
ism := imageStreamMapping{
|
||||
typeMeta: typeMeta{
|
||||
Kind: "ImageStreamMapping",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
objectMeta: objectMeta{
|
||||
Namespace: d.client.namespace,
|
||||
Name: d.client.stream,
|
||||
},
|
||||
Image: image{
|
||||
objectMeta: objectMeta{
|
||||
Name: manifestDigest,
|
||||
},
|
||||
DockerImageReference: dockerImageReference,
|
||||
DockerImageManifest: string(manifest),
|
||||
},
|
||||
Tag: d.client.tag,
|
||||
}
|
||||
body, err := json.Marshal(ism)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME: validate components per validation.IsValidPathSegmentName?
|
||||
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreammappings", d.client.namespace)
|
||||
body, err = d.client.doRequest("POST", path, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.docker.PutManifest(manifest)
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) PutLayer(digest string, stream io.Reader) error {
|
||||
return d.docker.PutLayer(digest, stream)
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return fmt.Errorf("Pushing signatures to an Atomic Registry is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
|
||||
type imageStream struct {
|
||||
Status imageStreamStatus `json:"status,omitempty"`
|
||||
}
|
||||
type imageStreamStatus struct {
|
||||
DockerImageRepository string `json:"dockerImageRepository"`
|
||||
Tags []namedTagEventList `json:"tags,omitempty"`
|
||||
}
|
||||
type namedTagEventList struct {
|
||||
Tag string `json:"tag"`
|
||||
Items []tagEvent `json:"items"`
|
||||
}
|
||||
type tagEvent struct {
|
||||
DockerImageReference string `json:"dockerImageReference"`
|
||||
Image string `json:"image"`
|
||||
}
|
||||
type imageStreamImage struct {
|
||||
Image image `json:"image"`
|
||||
}
|
||||
type image struct {
|
||||
objectMeta `json:"metadata,omitempty"`
|
||||
DockerImageReference string `json:"dockerImageReference,omitempty"`
|
||||
// DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"`
|
||||
DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"`
|
||||
DockerImageManifest string `json:"dockerImageManifest,omitempty"`
|
||||
// DockerImageLayers []ImageLayer `json:"dockerImageLayers"`
|
||||
}
|
||||
type imageStreamMapping struct {
|
||||
typeMeta `json:",inline"`
|
||||
objectMeta `json:"metadata,omitempty"`
|
||||
Image image `json:"image"`
|
||||
Tag string `json:"tag"`
|
||||
}
|
||||
type typeMeta struct {
|
||||
Kind string `json:"kind,omitempty"`
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
}
|
||||
type objectMeta struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
GenerateName string `json:"generateName,omitempty"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
SelfLink string `json:"selfLink,omitempty"`
|
||||
ResourceVersion string `json:"resourceVersion,omitempty"`
|
||||
Generation int64 `json:"generation,omitempty"`
|
||||
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status
|
||||
type status struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
// Reason StatusReason `json:"reason,omitempty"`
|
||||
// Details *StatusDetails `json:"details,omitempty"`
|
||||
Code int32 `json:"code,omitempty"`
|
||||
}
|
||||
@@ -1,210 +0,0 @@
|
||||
package reference // COPY WITH EDITS FROM DOCKER/DOCKER
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified
|
||||
DefaultTag = "latest"
|
||||
// DefaultHostname is the default built-in hostname
|
||||
DefaultHostname = "docker.io"
|
||||
// LegacyDefaultHostname is automatically converted to DefaultHostname
|
||||
LegacyDefaultHostname = "index.docker.io"
|
||||
// DefaultRepoPrefix is the prefix used for default repositories in default host
|
||||
DefaultRepoPrefix = "library/"
|
||||
)
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
// Name returns normalized repository name, like "ubuntu".
|
||||
Name() string
|
||||
// String returns full reference, like "ubuntu@sha256:abcdef..."
|
||||
String() string
|
||||
// FullName returns full repository name with hostname, like "docker.io/library/ubuntu"
|
||||
FullName() string
|
||||
// Hostname returns hostname for the reference, like "docker.io"
|
||||
Hostname() string
|
||||
// RemoteName returns the repository component of the full name, like "library/ubuntu"
|
||||
RemoteName() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with hostname and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name, otherwise an error is
|
||||
// returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := distreference.ParseNamed(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s)
|
||||
}
|
||||
r, err := WithName(named.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if canonical, isCanonical := named.(distreference.Canonical); isCanonical {
|
||||
return WithDigest(r, canonical.Digest())
|
||||
}
|
||||
if tagged, isTagged := named.(distreference.NamedTagged); isTagged {
|
||||
return WithTag(r, tagged.Tag())
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
name, err := normalize(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateName(name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err := distreference.WithName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &namedRef{r}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
r, err := distreference.WithTag(name, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &taggedRef{namedRef{r}}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
r, err := distreference.WithDigest(name, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &canonicalRef{namedRef{r}}, nil
|
||||
}
|
||||
|
||||
type namedRef struct {
|
||||
distreference.Named
|
||||
}
|
||||
type taggedRef struct {
|
||||
namedRef
|
||||
}
|
||||
type canonicalRef struct {
|
||||
namedRef
|
||||
}
|
||||
|
||||
func (r *namedRef) FullName() string {
|
||||
hostname, remoteName := splitHostname(r.Name())
|
||||
return hostname + "/" + remoteName
|
||||
}
|
||||
func (r *namedRef) Hostname() string {
|
||||
hostname, _ := splitHostname(r.Name())
|
||||
return hostname
|
||||
}
|
||||
func (r *namedRef) RemoteName() string {
|
||||
_, remoteName := splitHostname(r.Name())
|
||||
return remoteName
|
||||
}
|
||||
func (r *taggedRef) Tag() string {
|
||||
return r.namedRef.Named.(distreference.NamedTagged).Tag()
|
||||
}
|
||||
func (r *canonicalRef) Digest() digest.Digest {
|
||||
return r.namedRef.Named.(distreference.Canonical).Digest()
|
||||
}
|
||||
|
||||
// WithDefaultTag adds a default tag to a reference if it only has a repo name.
|
||||
func WithDefaultTag(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
ref, _ = WithTag(ref, DefaultTag)
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// splitHostname splits a repository name to hostname and remotename string.
|
||||
// If no valid hostname is found, the default hostname is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitHostname(name string) (hostname, remoteName string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
hostname, remoteName = DefaultHostname, name
|
||||
} else {
|
||||
hostname, remoteName = name[:i], name[i+1:]
|
||||
}
|
||||
if hostname == LegacyDefaultHostname {
|
||||
hostname = DefaultHostname
|
||||
}
|
||||
if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') {
|
||||
remoteName = DefaultRepoPrefix + remoteName
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// normalize returns a repository name in its normalized form, meaning it
|
||||
// will not contain default hostname nor library/ prefix for official images.
|
||||
func normalize(name string) (string, error) {
|
||||
host, remoteName := splitHostname(name)
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return "", errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
if host == DefaultHostname {
|
||||
if strings.HasPrefix(remoteName, DefaultRepoPrefix) {
|
||||
return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil
|
||||
}
|
||||
return remoteName, nil
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// EDIT FROM DOCKER/DOCKER TO NOT IMPORT IMAGE.V1
|
||||
|
||||
func validateName(name string) error {
|
||||
if err := ValidateIDV1(name); err == nil {
|
||||
return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||
|
||||
// ValidateIDV1 checks whether an ID string is a valid image ID.
|
||||
func ValidateIDV1(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/projectatomic/skopeo/docker/utils"
|
||||
)
|
||||
|
||||
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
|
||||
// using mech and keyIdentity.
|
||||
func SignDockerManifest(manifest []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
manifestDigest, err := utils.ManifestDigest(manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig := privateSignature{
|
||||
Signature{
|
||||
DockerManifestDigest: manifestDigest,
|
||||
DockerReference: dockerReference,
|
||||
},
|
||||
}
|
||||
return sig.sign(mech, keyIdentity)
|
||||
}
|
||||
|
||||
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
|
||||
// using mech.
|
||||
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
|
||||
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
|
||||
expectedManifestDigest, err := utils.ManifestDigest(unverifiedManifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, expectedKeyIdentity, expectedDockerReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sig.DockerManifestDigest != expectedManifestDigest {
|
||||
return nil, InvalidSignatureError{msg: fmt.Sprintf("Docker manifest digest %s does not match %s", sig.DockerManifestDigest, expectedManifestDigest)}
|
||||
}
|
||||
return sig, nil
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSignDockerManifest(t *testing.T) {
|
||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Successful signing
|
||||
signature, err := SignDockerManifest(manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, TestImageSignatureReference, verified.DockerReference)
|
||||
assert.Equal(t, TestImageManifestDigest, verified.DockerManifestDigest)
|
||||
|
||||
// Error computing Docker manifest
|
||||
invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
|
||||
require.NoError(t, err)
|
||||
_, err = SignDockerManifest(invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Error creating blob to sign
|
||||
_, err = SignDockerManifest(manifest, "", mech, TestKeyFingerprint)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Error signing
|
||||
_, err = SignDockerManifest(manifest, TestImageSignatureReference, mech, "this fingerprint doesn't exist")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyDockerManifestSignature(t *testing.T) {
|
||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
|
||||
require.NoError(t, err)
|
||||
signature, err := ioutil.ReadFile("fixtures/image.signature")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Successful verification
|
||||
sig, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
|
||||
assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
|
||||
|
||||
// For extra paranoia, test that we return nil data on error.
|
||||
|
||||
// Error computing Docker manifest
|
||||
invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
|
||||
require.NoError(t, err)
|
||||
sig, err = VerifyDockerManifestSignature(signature, invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
|
||||
// Error verifying signature
|
||||
corruptSignature, err := ioutil.ReadFile("fixtures/corrupt.signature")
|
||||
sig, err = VerifyDockerManifestSignature(corruptSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
|
||||
// Key fingerprint mismatch
|
||||
sig, err = VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, "unexpected fingerprint")
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
|
||||
// Docker manifest digest mismatch
|
||||
sig, err = VerifyDockerManifestSignature(signature, []byte("unexpected manifest"), TestImageSignatureReference, mech, TestKeyFingerprint)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,84 +0,0 @@
|
||||
{
|
||||
"default": [
|
||||
{
|
||||
"type": "reject"
|
||||
}
|
||||
],
|
||||
"specific": {
|
||||
"example.com/playground": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
],
|
||||
"example.com/production": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/keys/employee-gpg-keyring"
|
||||
}
|
||||
],
|
||||
"example.com/hardened": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/keys/employee-gpg-keyring",
|
||||
"signedIdentity": {
|
||||
"type": "matchRepository"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "signedByGPGKeys",
|
||||
"keyPath": "/keys/public-key-signing-gpg-keyring",
|
||||
"signedIdentity": {
|
||||
"type": "matchExact"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "signedBaseLayer",
|
||||
"baseLayerIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "registry.access.redhat.com/rhel7/rhel"
|
||||
}
|
||||
}
|
||||
],
|
||||
"example.com/hardened-x509": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "X509Certificates",
|
||||
"keyPath": "/keys/employee-cert-file",
|
||||
"signedIdentity": {
|
||||
"type": "matchRepository"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "signedByX509CAs",
|
||||
"keyPath": "/keys/public-key-signing-ca-file"
|
||||
}
|
||||
],
|
||||
"registry.access.redhat.com": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "signedByGPGKeys",
|
||||
"keyPath": "/keys/RH-key-signing-key-gpg-keyring"
|
||||
}
|
||||
],
|
||||
"bogus/key-data-example": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "signedByGPGKeys",
|
||||
"keyData": "bm9uc2Vuc2U="
|
||||
}
|
||||
],
|
||||
"bogus/signed-identity-example": [
|
||||
{
|
||||
"type": "signedBaseLayer",
|
||||
"baseLayerIdentity": {
|
||||
"type": "exactReference",
|
||||
"dockerReference": "registry.access.redhat.com/rhel7/rhel:latest"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mI0EVurzqQEEAL3qkFq4K2URtSWVDYnQUNA9HdM9sqS2eAWfqUFMrkD5f+oN+LBL
|
||||
tPyaE5GNLA0vXY7nHAM2TeM8ijZ/eMP17Raj64JL8GhCymL3wn2jNvb9XaF0R0s6
|
||||
H0IaRPPu45A3SnxLwm4Orc/9Z7/UxtYjKSg9xOaTiVPzJgaf5Vm4J4ApABEBAAG0
|
||||
EnNrb3BlbyB0ZXN0aW5nIGtleYi4BBMBAgAiBQJW6vOpAhsDBgsJCAcDAgYVCAIJ
|
||||
CgsEFgIDAQIeAQIXgAAKCRDbcvIYi7RsyBbOBACgJFiKDlQ1UyvsNmGqJ7D0OpbS
|
||||
1OppJlradKgZXyfahFswhFI+7ZREvELLHbinq3dBy5cLXRWzQKdJZNHknSN5Tjf2
|
||||
0ipVBQuqpcBo+dnKiG4zH6fhTri7yeTZksIDfsqlI6FXDOdKLUSnahagEBn4yU+x
|
||||
jHPvZk5SuuZv56A45biNBFbq86kBBADIC/9CsAlOmRALuYUmkhcqEjuFwn3wKz2d
|
||||
IBjzgvro7zcVNNCgxQfMEjcUsvEh5cx13G3QQHcwOKy3M6Bv6VMhfZjd+1P1el4P
|
||||
0fJS8GFmhWRBknMN8jFsgyohQeouQ798RFFv94KszfStNnr/ae8oao5URmoUXSCa
|
||||
/MdUxn0YKwARAQABiJ8EGAECAAkFAlbq86kCGwwACgkQ23LyGIu0bMjUywQAq0dn
|
||||
lUpDNSoLTcpNWuVvHQ7c/qmnE4TyiSLiRiAywdEWA6gMiyhUUucuGsEhMFP1WX1k
|
||||
UNwArZ6UG7BDOUsvngP7jKGNqyUOQrq1s/r8D+0MrJGOWErGLlfttO2WeoijECkI
|
||||
5qm8cXzAra3Xf/Z3VjxYTKSnNu37LtZkakdTdYE=
|
||||
=tJAt
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "mitr/buxybox",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
],
|
||||
"history": [
|
||||
],
|
||||
"signatures": 1
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package signature
|
||||
|
||||
const (
|
||||
// TestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
|
||||
TestImageManifestDigest = "sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"
|
||||
// TestImageSignatureReference is the Docker image reference signed in "image.signature"
|
||||
TestImageSignatureReference = "testing/manifest"
|
||||
// TestKeyFingerprint is the fingerprint of the private key in this directory.
|
||||
TestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
|
||||
)
|
||||
@@ -1,149 +0,0 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mSI map[string]interface{} // To minimize typing the long name
|
||||
|
||||
// A short-hand way to get a JSON object field value or panic. No error handling done, we know
|
||||
// what we are working with, a panic in a test is good enough, and fitting test cases on a single line
|
||||
// is a priority.
|
||||
func x(m mSI, fields ...string) mSI {
|
||||
for _, field := range fields {
|
||||
// Not .(mSI) because type assertion of an unnamed type to a named type always fails (the types
|
||||
// are not "identical"), but the assignment is fine because they are "assignable".
|
||||
m = m[field].(map[string]interface{})
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func TestValidateExactMapKeys(t *testing.T) {
|
||||
// Empty map and keys
|
||||
err := validateExactMapKeys(mSI{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Success
|
||||
err = validateExactMapKeys(mSI{"a": nil, "b": 1}, "b", "a")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Extra map keys
|
||||
err = validateExactMapKeys(mSI{"a": nil, "b": 1}, "a")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Extra expected keys
|
||||
err = validateExactMapKeys(mSI{"a": 1}, "b", "a")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Unexpected key values
|
||||
err = validateExactMapKeys(mSI{"a": 1}, "b")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMapField(t *testing.T) {
|
||||
// Field not found
|
||||
_, err := mapField(mSI{"a": mSI{}}, "b")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Field has a wrong type
|
||||
_, err = mapField(mSI{"a": 1}, "a")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Success
|
||||
// FIXME? We can't use mSI as the type of child, that type apparently can't be converted to the raw map type.
|
||||
child := map[string]interface{}{"b": mSI{}}
|
||||
m, err := mapField(mSI{"a": child, "b": nil}, "a")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, child, m)
|
||||
}
|
||||
|
||||
func TestStringField(t *testing.T) {
|
||||
// Field not found
|
||||
_, err := stringField(mSI{"a": "x"}, "b")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Field has a wrong type
|
||||
_, err = stringField(mSI{"a": 1}, "a")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Success
|
||||
s, err := stringField(mSI{"a": "x", "b": nil}, "a")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "x", s)
|
||||
}
|
||||
|
||||
// implementsUnmarshalJSON is a minimalistic type used to detect that
|
||||
// paranoidUnmarshalJSONObject uses the json.Unmarshaler interface of resolved
|
||||
// pointers.
|
||||
type implementsUnmarshalJSON bool
|
||||
|
||||
// Compile-time check that Policy implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*implementsUnmarshalJSON)(nil)
|
||||
|
||||
func (dest *implementsUnmarshalJSON) UnmarshalJSON(data []byte) error {
|
||||
_ = data // We don't care, not really.
|
||||
*dest = true // Mark handler as called
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestParanoidUnmarshalJSONObject(t *testing.T) {
|
||||
type testStruct struct {
|
||||
A string
|
||||
B int
|
||||
}
|
||||
ts := testStruct{}
|
||||
var unmarshalJSONCalled implementsUnmarshalJSON
|
||||
tsResolver := func(key string) interface{} {
|
||||
switch key {
|
||||
case "a":
|
||||
return &ts.A
|
||||
case "b":
|
||||
return &ts.B
|
||||
case "implementsUnmarshalJSON":
|
||||
return &unmarshalJSONCalled
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Empty object
|
||||
ts = testStruct{}
|
||||
err := paranoidUnmarshalJSONObject([]byte(`{}`), tsResolver)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testStruct{}, ts)
|
||||
|
||||
// Success
|
||||
ts = testStruct{}
|
||||
err = paranoidUnmarshalJSONObject([]byte(`{"a":"x", "b":2}`), tsResolver)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testStruct{A: "x", B: 2}, ts)
|
||||
|
||||
// json.Unamarshaler is used for decoding values
|
||||
ts = testStruct{}
|
||||
unmarshalJSONCalled = implementsUnmarshalJSON(false)
|
||||
err = paranoidUnmarshalJSONObject([]byte(`{"implementsUnmarshalJSON":true}`), tsResolver)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, unmarshalJSONCalled, implementsUnmarshalJSON(true))
|
||||
|
||||
// Various kinds of invalid input
|
||||
for _, input := range []string{
|
||||
``, // Empty input
|
||||
`&`, // Entirely invalid JSON
|
||||
`1`, // Not an object
|
||||
`{&}`, // Invalid key JSON
|
||||
`{1:1}`, // Key not a string
|
||||
`{"b":1, "b":1}`, // Duplicate key
|
||||
`{"thisdoesnotexist":1}`, // Key rejected by resolver
|
||||
`{"a":&}`, // Invalid value JSON
|
||||
`{"a":1}`, // Type mismatch
|
||||
`{"a":"value"}{}`, // Extra data after object
|
||||
} {
|
||||
ts = testStruct{}
|
||||
err := paranoidUnmarshalJSONObject([]byte(input), tsResolver)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/mtrmac/gpgme"
|
||||
)
|
||||
|
||||
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
|
||||
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
|
||||
// eliminate ambiguities, support CA signatures and perhaps other key properties)
|
||||
type SigningMechanism interface {
|
||||
// ImportKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
||||
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
||||
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism).
|
||||
ImportKeysFromBytes(blob []byte) ([]string, error)
|
||||
// Sign creates a (non-detached) signature of input using keyidentity
|
||||
Sign(input []byte, keyIdentity string) ([]byte, error)
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
|
||||
}
|
||||
|
||||
// A GPG/OpenPGP signing mechanism.
|
||||
type gpgSigningMechanism struct {
|
||||
ctx *gpgme.Context
|
||||
}
|
||||
|
||||
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism.
|
||||
func NewGPGSigningMechanism() (SigningMechanism, error) {
|
||||
return newGPGSigningMechanismInDirectory("")
|
||||
}
|
||||
|
||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||
ctx, err := gpgme.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if optionalDir != "" {
|
||||
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ctx.SetArmor(false)
|
||||
ctx.SetTextMode(false)
|
||||
return gpgSigningMechanism{ctx: ctx}, nil
|
||||
}
|
||||
|
||||
// ImportKeysFromBytes implements SigningMechanism.ImportKeysFromBytes
|
||||
func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error) {
|
||||
inputData, err := gpgme.NewDataBytes(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := m.ctx.Import(inputData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyIdentities := []string{}
|
||||
for _, i := range res.Imports {
|
||||
if i.Result == nil {
|
||||
keyIdentities = append(keyIdentities, i.Fingerprint)
|
||||
}
|
||||
}
|
||||
return keyIdentities, nil
|
||||
}
|
||||
|
||||
// Sign implements SigningMechanism.Sign
|
||||
func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
key, err := m.ctx.GetKey(keyIdentity, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputData, err := gpgme.NewDataBytes(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sigBuffer bytes.Buffer
|
||||
sigData, err := gpgme.NewDataWriter(&sigBuffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sigBuffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Verify implements SigningMechanism.Verify
|
||||
func (m gpgSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
signedBuffer := bytes.Buffer{}
|
||||
signedData, err := gpgme.NewDataWriter(&signedBuffer)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if len(sigs) != 1 {
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
|
||||
}
|
||||
sig := sigs[0]
|
||||
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
|
||||
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
|
||||
// FIXME: Better error reporting eventually
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
|
||||
}
|
||||
return signedBuffer.Bytes(), sig.Fingerprint, nil
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testGPGHomeDirectory = "./fixtures"
|
||||
)
|
||||
|
||||
func TestNewGPGSigningMechanism(t *testing.T) {
|
||||
// A dumb test just for code coverage. We test more with newGPGSigningMechanismInDirectory().
|
||||
_, err := NewGPGSigningMechanism()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNewGPGSigningMechanismInDirectory(t *testing.T) {
|
||||
// A dumb test just for code coverage.
|
||||
_, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||
assert.NoError(t, err)
|
||||
// The various GPG failure cases are not obviously easy to reach.
|
||||
}
|
||||
|
||||
func TestGPGSigningMechanismImportKeysFromBytes(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", "gpg-import-keys")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
mech, err := newGPGSigningMechanismInDirectory(testDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try validating a signature when the key is unknown.
|
||||
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
||||
require.NoError(t, err)
|
||||
content, signingFingerprint, err := mech.Verify(signature)
|
||||
require.Error(t, err)
|
||||
|
||||
// Successful import
|
||||
keyBlob, err := ioutil.ReadFile("./fixtures/public-key.gpg")
|
||||
require.NoError(t, err)
|
||||
keyIdentities, err := mech.ImportKeysFromBytes(keyBlob)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{TestKeyFingerprint}, keyIdentities)
|
||||
|
||||
// After import, the signature should validate.
|
||||
content, signingFingerprint, err = mech.Verify(signature)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("This is not JSON\n"), content)
|
||||
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
|
||||
|
||||
// Two keys: just concatenate the valid input twice.
|
||||
keyIdentities, err = mech.ImportKeysFromBytes(bytes.Join([][]byte{keyBlob, keyBlob}, nil))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprint}, keyIdentities)
|
||||
|
||||
// Invalid input: This is accepted anyway by GPG, just returns no keys.
|
||||
keyIdentities, err = mech.ImportKeysFromBytes([]byte("This is invalid"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{}, keyIdentities)
|
||||
// The various GPG/GPGME failures cases are not obviously easy to reach.
|
||||
}
|
||||
|
||||
func TestGPGSigningMechanismSign(t *testing.T) {
|
||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Successful signing
|
||||
content := []byte("content")
|
||||
signature, err := mech.Sign(content, TestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedContent, signingFingerprint, err := mech.Verify(signature)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, content, signedContent)
|
||||
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
|
||||
|
||||
// Error signing
|
||||
_, err = mech.Sign(content, "this fingerprint doesn't exist")
|
||||
assert.Error(t, err)
|
||||
// The various GPG/GPGME failures cases are not obviously easy to reach.
|
||||
}
|
||||
|
||||
func assertSigningError(t *testing.T, content []byte, fingerprint string, err error) {
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, content)
|
||||
assert.Empty(t, fingerprint)
|
||||
}
|
||||
|
||||
func TestGPGSigningMechanismVerify(t *testing.T) {
|
||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Successful verification
|
||||
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
||||
require.NoError(t, err)
|
||||
content, signingFingerprint, err := mech.Verify(signature)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("This is not JSON\n"), content)
|
||||
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
|
||||
|
||||
// For extra paranoia, test that we return nil data on error.
|
||||
|
||||
// Completely invalid signature.
|
||||
content, signingFingerprint, err = mech.Verify([]byte{})
|
||||
assertSigningError(t, content, signingFingerprint, err)
|
||||
|
||||
content, signingFingerprint, err = mech.Verify([]byte("invalid signature"))
|
||||
assertSigningError(t, content, signingFingerprint, err)
|
||||
|
||||
// Literal packet, not a signature
|
||||
signature, err = ioutil.ReadFile("./fixtures/unsigned-literal.signature")
|
||||
require.NoError(t, err)
|
||||
content, signingFingerprint, err = mech.Verify(signature)
|
||||
assertSigningError(t, content, signingFingerprint, err)
|
||||
|
||||
// Encrypted data, not a signature.
|
||||
signature, err = ioutil.ReadFile("./fixtures/unsigned-encrypted.signature")
|
||||
require.NoError(t, err)
|
||||
content, signingFingerprint, err = mech.Verify(signature)
|
||||
assertSigningError(t, content, signingFingerprint, err)
|
||||
|
||||
// FIXME? Is there a way to create a multi-signature so that gpgme_op_verify returns multiple signatures?
|
||||
|
||||
// Expired signature
|
||||
signature, err = ioutil.ReadFile("./fixtures/expired.signature")
|
||||
require.NoError(t, err)
|
||||
content, signingFingerprint, err = mech.Verify(signature)
|
||||
assertSigningError(t, content, signingFingerprint, err)
|
||||
|
||||
// Corrupt signature
|
||||
signature, err = ioutil.ReadFile("./fixtures/corrupt.signature")
|
||||
require.NoError(t, err)
|
||||
content, signingFingerprint, err = mech.Verify(signature)
|
||||
assertSigningError(t, content, signingFingerprint, err)
|
||||
// The various GPG/GPGME failures cases are not obviously easy to reach.
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,181 +0,0 @@
|
||||
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
)
|
||||
|
||||
const (
|
||||
signatureType = "atomic container signature"
|
||||
signatureCreatorID = "atomic " + version.Version
|
||||
)
|
||||
|
||||
// InvalidSignatureError is returned when parsing an invalid signature.
|
||||
type InvalidSignatureError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (err InvalidSignatureError) Error() string {
|
||||
return err.msg
|
||||
}
|
||||
|
||||
// Signature is a parsed content of a signature.
|
||||
type Signature struct {
|
||||
DockerManifestDigest string // FIXME: more precise type?
|
||||
DockerReference string // FIXME: more precise type?
|
||||
}
|
||||
|
||||
// Wrap signature to add to it some methods which we don't want to make public.
|
||||
type privateSignature struct {
|
||||
Signature
|
||||
}
|
||||
|
||||
// Compile-time check that privateSignature implements json.Marshaler
|
||||
var _ json.Marshaler = (*privateSignature)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s privateSignature) MarshalJSON() ([]byte, error) {
|
||||
return s.marshalJSONWithVariables(time.Now().UTC().Unix(), signatureCreatorID)
|
||||
}
|
||||
|
||||
// Implementation of MarshalJSON, with a caller-chosen values of the variable items to help testing.
|
||||
func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID string) ([]byte, error) {
|
||||
if s.DockerManifestDigest == "" || s.DockerReference == "" {
|
||||
return nil, errors.New("Unexpected empty signature content")
|
||||
}
|
||||
critical := map[string]interface{}{
|
||||
"type": signatureType,
|
||||
"image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest},
|
||||
"identity": map[string]string{"docker-reference": s.DockerReference},
|
||||
}
|
||||
optional := map[string]interface{}{
|
||||
"creator": creatorID,
|
||||
"timestamp": timestamp,
|
||||
}
|
||||
signature := map[string]interface{}{
|
||||
"critical": critical,
|
||||
"optional": optional,
|
||||
}
|
||||
return json.Marshal(signature)
|
||||
}
|
||||
|
||||
// Compile-time check that privateSignature implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*privateSignature)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *privateSignature) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if _, ok := err.(jsonFormatError); ok {
|
||||
err = InvalidSignatureError{msg: err.Error()}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *privateSignature) strictUnmarshalJSON(data []byte) error {
|
||||
var untyped interface{}
|
||||
if err := json.Unmarshal(data, &untyped); err != nil {
|
||||
return err
|
||||
}
|
||||
o, ok := untyped.(map[string]interface{})
|
||||
if !ok {
|
||||
return InvalidSignatureError{msg: "Invalid signature format"}
|
||||
}
|
||||
if err := validateExactMapKeys(o, "critical", "optional"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := mapField(o, "critical")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(c, "type", "image", "identity"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional, err := mapField(o, "optional")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = optional // We don't use anything from here for now.
|
||||
|
||||
t, err := stringField(c, "type")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t != signatureType {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
|
||||
}
|
||||
|
||||
image, err := mapField(c, "image")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil {
|
||||
return err
|
||||
}
|
||||
digest, err := stringField(image, "docker-manifest-digest")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DockerManifestDigest = digest
|
||||
|
||||
identity, err := mapField(c, "identity")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(identity, "docker-reference"); err != nil {
|
||||
return err
|
||||
}
|
||||
reference, err := stringField(identity, "docker-reference")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DockerReference = reference
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign formats the signature and returns a blob signed using mech and keyIdentity
|
||||
func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
json, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mech.Sign(json, keyIdentity)
|
||||
}
|
||||
|
||||
// verifyAndExtractSignature verifies that signature has been signed by expectedKeyIdentity
|
||||
// using mech for expectedDockerReference, and returns it (without matching its contents to an image).
|
||||
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte,
|
||||
expectedKeyIdentity, expectedDockerReference string) (*Signature, error) {
|
||||
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if keyIdentity != expectedKeyIdentity {
|
||||
return nil, InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
|
||||
}
|
||||
|
||||
var unmatchedSignature privateSignature
|
||||
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
|
||||
return nil, InvalidSignatureError{msg: err.Error()}
|
||||
}
|
||||
|
||||
if unmatchedSignature.DockerReference != expectedDockerReference {
|
||||
return nil, InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
|
||||
unmatchedSignature.DockerReference, expectedDockerReference)}
|
||||
}
|
||||
signature := unmatchedSignature.Signature // Policy OK.
|
||||
return &signature, nil
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInvalidSignatureError(t *testing.T) {
|
||||
// A stupid test just to keep code coverage
|
||||
s := "test"
|
||||
err := InvalidSignatureError{msg: s}
|
||||
assert.Equal(t, s, err.Error())
|
||||
}
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
// Empty string values
|
||||
s := privateSignature{Signature{DockerManifestDigest: "", DockerReference: "_"}}
|
||||
_, err := s.MarshalJSON()
|
||||
assert.Error(t, err)
|
||||
s = privateSignature{Signature{DockerManifestDigest: "_", DockerReference: ""}}
|
||||
_, err = s.MarshalJSON()
|
||||
assert.Error(t, err)
|
||||
|
||||
// Success
|
||||
s = privateSignature{Signature{DockerManifestDigest: "digest!@#", DockerReference: "reference#@!"}}
|
||||
marshaled, err := s.marshalJSONWithVariables(0, "CREATOR")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{\"creator\":\"CREATOR\",\"timestamp\":0}}"),
|
||||
marshaled)
|
||||
|
||||
// We can't test MarshalJSON directly because the timestamp will keep changing, so just test that
|
||||
// it doesn't fail. And call it through the JSON package for a good measure.
|
||||
_, err = json.Marshal(s)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Return the result of modifying validJSON with fn and unmarshaling it into *sig
|
||||
func tryUnmarshalModifiedSignature(t *testing.T, sig *privateSignature, validJSON []byte, modifyFn func(mSI)) error {
|
||||
var tmp mSI
|
||||
err := json.Unmarshal(validJSON, &tmp)
|
||||
require.NoError(t, err)
|
||||
|
||||
modifyFn(tmp)
|
||||
|
||||
testJSON, err := json.Marshal(tmp)
|
||||
require.NoError(t, err)
|
||||
|
||||
*sig = privateSignature{}
|
||||
return json.Unmarshal(testJSON, sig)
|
||||
}
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
var s privateSignature
|
||||
// Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our
|
||||
// UnmarshalJSON implementation; so test that first, then test our error handling for completeness.
|
||||
err := json.Unmarshal([]byte("&"), &s)
|
||||
assert.Error(t, err)
|
||||
err = s.UnmarshalJSON([]byte("&"))
|
||||
assert.Error(t, err)
|
||||
|
||||
// Not an object
|
||||
err = json.Unmarshal([]byte("1"), &s)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Start with a valid JSON.
|
||||
validSig := privateSignature{
|
||||
Signature{
|
||||
DockerManifestDigest: "digest!@#",
|
||||
DockerReference: "reference#@!",
|
||||
},
|
||||
}
|
||||
validJSON, err := validSig.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Success
|
||||
s = privateSignature{}
|
||||
err = json.Unmarshal(validJSON, &s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, validSig, s)
|
||||
|
||||
// Various ways to corrupt the JSON
|
||||
breakFns := []func(mSI){
|
||||
// A top-level field is missing
|
||||
func(v mSI) { delete(v, "critical") },
|
||||
func(v mSI) { delete(v, "optional") },
|
||||
// Extra top-level sub-object
|
||||
func(v mSI) { v["unexpected"] = 1 },
|
||||
// "critical" not an object
|
||||
func(v mSI) { v["critical"] = 1 },
|
||||
// "optional" not an object
|
||||
func(v mSI) { v["optional"] = 1 },
|
||||
// A field of "critical" is missing
|
||||
func(v mSI) { delete(x(v, "critical"), "type") },
|
||||
func(v mSI) { delete(x(v, "critical"), "image") },
|
||||
func(v mSI) { delete(x(v, "critical"), "identity") },
|
||||
// Extra field of "critical"
|
||||
func(v mSI) { x(v, "critical")["unexpected"] = 1 },
|
||||
// Invalid "type"
|
||||
func(v mSI) { x(v, "critical")["type"] = 1 },
|
||||
func(v mSI) { x(v, "critical")["type"] = "unexpected" },
|
||||
// Invalid "image" object
|
||||
func(v mSI) { x(v, "critical")["image"] = 1 },
|
||||
func(v mSI) { delete(x(v, "critical", "image"), "docker-manifest-digest") },
|
||||
func(v mSI) { x(v, "critical", "image")["unexpected"] = 1 },
|
||||
// Invalid "docker-manifest-digest"
|
||||
func(v mSI) { x(v, "critical", "image")["docker-manifest-digest"] = 1 },
|
||||
// Invalid "identity" object
|
||||
func(v mSI) { x(v, "critical")["identity"] = 1 },
|
||||
func(v mSI) { delete(x(v, "critical", "identity"), "docker-reference") },
|
||||
func(v mSI) { x(v, "critical", "identity")["unexpected"] = 1 },
|
||||
// Invalid "docker-reference"
|
||||
func(v mSI) { x(v, "critical", "identity")["docker-reference"] = 1 },
|
||||
}
|
||||
for _, fn := range breakFns {
|
||||
err = tryUnmarshalModifiedSignature(t, &s, validJSON, fn)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Modifications to "optional" are allowed and ignored
|
||||
allowedModificationFns := []func(mSI){
|
||||
// Add an optional field
|
||||
func(v mSI) { x(v, "optional")["unexpected"] = 1 },
|
||||
// Delete an optional field
|
||||
func(v mSI) { delete(x(v, "optional"), "creator") },
|
||||
}
|
||||
for _, fn := range allowedModificationFns {
|
||||
err = tryUnmarshalModifiedSignature(t, &s, validJSON, fn)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, validSig, s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSign(t *testing.T) {
|
||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||
require.NoError(t, err)
|
||||
|
||||
sig := privateSignature{
|
||||
Signature{
|
||||
DockerManifestDigest: "digest!@#",
|
||||
DockerReference: "reference#@!",
|
||||
},
|
||||
}
|
||||
|
||||
// Successful signing
|
||||
signature, err := sig.sign(mech, TestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := verifyAndExtractSignature(mech, signature, TestKeyFingerprint, sig.DockerReference)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, sig.Signature, *verified)
|
||||
|
||||
// Error creating blob to sign
|
||||
_, err = privateSignature{}.sign(mech, TestKeyFingerprint)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Error signing
|
||||
_, err = sig.sign(mech, "this fingerprint doesn't exist")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyAndExtractSignature(t *testing.T) {
|
||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := ioutil.ReadFile("./fixtures/image.signature")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Successful verification
|
||||
sig, err := verifyAndExtractSignature(mech, signature, TestKeyFingerprint, TestImageSignatureReference)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
|
||||
assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
|
||||
|
||||
// For extra paranoia, test that we return a nil signature object on error.
|
||||
|
||||
// Completely invalid signature.
|
||||
sig, err = verifyAndExtractSignature(mech, []byte{}, TestKeyFingerprint, TestImageSignatureReference)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
|
||||
sig, err = verifyAndExtractSignature(mech, []byte("invalid signature"), TestKeyFingerprint, TestImageSignatureReference)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
|
||||
// Valid signature of non-JSON
|
||||
invalidBlobSignature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
||||
require.NoError(t, err)
|
||||
sig, err = verifyAndExtractSignature(mech, invalidBlobSignature, TestKeyFingerprint, TestImageSignatureReference)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
|
||||
// Valid signature with a wrong key
|
||||
sig, err = verifyAndExtractSignature(mech, signature, "unexpected fingerprint", TestImageSignatureReference)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
|
||||
// Valid signature with a wrong image reference
|
||||
sig, err = verifyAndExtractSignature(mech, signature, TestKeyFingerprint, "unexpected docker reference")
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sig)
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Registry is a service providing repositories.
|
||||
type Registry interface {
|
||||
Repositories() []Repository
|
||||
Repository(ref string) Repository
|
||||
Lookup(term string) []Image // docker registry v1 only AFAICT, v2 can be built hacking with Images()
|
||||
}
|
||||
|
||||
// Repository is a set of images.
|
||||
type Repository interface {
|
||||
Images() []Image
|
||||
Image(ref string) Image // ref == image name w/o registry part
|
||||
}
|
||||
|
||||
// ImageSource is a service, possibly remote (= slow), to download components of a single image.
|
||||
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
||||
// is usually more useful.
|
||||
type ImageSource interface {
|
||||
// IntendedDockerReference returns the full, unambiguous, Docker reference for this image, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
// May be "" if unknown.
|
||||
IntendedDockerReference() string
|
||||
// GetManifest returns the image's manifest along with its MIME type. The empty string is returned if the MIME type is unknown. The slice parameter indicates the supported mime types the manifest should be when getting it.
|
||||
// It may use a remote (= slow) service.
|
||||
GetManifest([]string) ([]byte, string, error)
|
||||
// Note: Calling GetLayer() may have ordering dependencies WRT other methods of this type. FIXME: How does this work with (docker save) on stdin?
|
||||
GetLayer(digest string) (io.ReadCloser, error)
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
GetSignatures() ([][]byte, error)
|
||||
}
|
||||
|
||||
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
||||
type ImageDestination interface {
|
||||
// CanonicalDockerReference returns the full, unambiguous, Docker reference for this image (even if the user referred to the image using some shorthand notation).
|
||||
CanonicalDockerReference() (string, error)
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
PutManifest([]byte) error
|
||||
// Note: Calling PutLayer() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
||||
PutLayer(digest string, stream io.Reader) error
|
||||
PutSignatures(signatures [][]byte) error
|
||||
}
|
||||
|
||||
// Image is the primary API for inspecting properties of images.
|
||||
type Image interface {
|
||||
// ref to repository?
|
||||
// IntendedDockerReference returns the full, unambiguous, Docker reference for this image, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
// May be "" if unknown.
|
||||
IntendedDockerReference() string
|
||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||
// FIXME? This should also return a MIME type if known, to differentiate between schema versions.
|
||||
Manifest() ([]byte, error)
|
||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||
Signatures() ([][]byte, error)
|
||||
Layers(layers ...string) error // configure download directory? Call it DownloadLayers?
|
||||
Inspect() (*ImageInspectInfo, error)
|
||||
DockerTar() ([]byte, error) // ??? also, configure output directory
|
||||
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
||||
// Eventually we should move this away from the generic Image interface, and move it into a Docker-specific case within the (skopeo inspect) command,
|
||||
// see https://github.com/projectatomic/skopeo/pull/58#discussion_r63411838 .
|
||||
GetRepositoryTags() ([]string, error)
|
||||
}
|
||||
|
||||
// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration.
|
||||
type ImageInspectInfo struct {
|
||||
Name string
|
||||
Tag string
|
||||
Created time.Time
|
||||
DockerVersion string
|
||||
Labels map[string]string
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
}
|
||||
|
||||
func (m *ImageInspectInfo) String() string {
|
||||
return fmt.Sprintf("%s:%s", m.Name, m.Tag)
|
||||
}
|
||||
49
vendor.conf
Normal file
49
vendor.conf
Normal file
@@ -0,0 +1,49 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
github.com/containers/storage master
|
||||
github.com/Sirupsen/logrus v0.10.0
|
||||
github.com/go-check/check v1
|
||||
github.com/stretchr/testify v1.1.3
|
||||
github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker v1.13.0
|
||||
github.com/docker/go-connections 4ccf312bf1d35e5dbda654e57a9be4c3f3cd0366
|
||||
github.com/vbatts/tar-split v0.10.1
|
||||
github.com/gorilla/context 14f550f51a
|
||||
github.com/gorilla/mux e444e69cbd
|
||||
github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97
|
||||
golang.org/x/net master
|
||||
# end docker deps
|
||||
golang.org/x/text master
|
||||
github.com/docker/distribution master
|
||||
github.com/docker/libtrust master
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/image-tools v0.1.0
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
k8s.io/client-go master
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
# containers/storage's dependencies that aren't already being pulled in
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux master
|
||||
golang.org/x/sys master
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml master
|
||||
@@ -1,6 +1,6 @@
|
||||
The MIT License
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2010-2014 Google, Inc. http://angularjs.org
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
Normal file
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
Normal file
@@ -0,0 +1,218 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
Normal file
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
||||
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
Normal file
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
||||
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
Normal file
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
||||
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
Normal file
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
Normal file
@@ -0,0 +1,568 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float() == 0.0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
||||
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
Normal file
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
Normal file
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
||||
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
Normal file
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
Normal file
@@ -0,0 +1,953 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.atEOF = true
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
func (lx *lexer) backup() {
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("unexpected EOF")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
return lexBareTableName
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
return lexBareTableName
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.push(lexKeyEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
return lexBareKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
||||
// (which is not whitespace) has not yet been consumed.
|
||||
func lexBareKey(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
||||
// separator).
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case '+', '-':
|
||||
return lexNumberStart
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemRawMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
"escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumber
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
||||
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
Normal file
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
Normal file
@@ -0,0 +1,592 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a string for a key (or part of a key in a table name).
|
||||
func (p *parser) keyString(it item) string {
|
||||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
||||
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
Normal file
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
case itemRawString:
|
||||
return tomlString
|
||||
case itemRawMultilineString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
||||
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
Normal file
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user