mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 05:49:52 +00:00
Compare commits
1631 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f15564f705 | ||
|
|
85e0fde20e | ||
|
|
fe1cd126f6 | ||
|
|
7a74faf4c1 | ||
|
|
dbe6764b35 | ||
|
|
5485daff13 | ||
|
|
cfbabac961 | ||
|
|
5907b4ef08 | ||
|
|
c456cef9bd | ||
|
|
0196219924 | ||
|
|
e945435dea | ||
|
|
c5103c6b51 | ||
|
|
61722a8a70 | ||
|
|
cc3ddf4804 | ||
|
|
d9f4377831 | ||
|
|
0717014e46 | ||
|
|
80dcddef36 | ||
|
|
6b41287cbf | ||
|
|
bef5e4505e | ||
|
|
f5a028e4d9 | ||
|
|
3d1d2978d7 | ||
|
|
035eb33f1f | ||
|
|
6cbb0c4c88 | ||
|
|
663fe44f27 | ||
|
|
cc24482985 | ||
|
|
b7bf15bc8b | ||
|
|
61b62f9e93 | ||
|
|
2c8655e251 | ||
|
|
94d588c480 | ||
|
|
a85e3beccf | ||
|
|
3878a37660 | ||
|
|
be600975a9 | ||
|
|
15f0d5cd2f | ||
|
|
6fa634227c | ||
|
|
e224b78efc | ||
|
|
1c4b0fc33d | ||
|
|
81e66ffc46 | ||
|
|
5995ceedf9 | ||
|
|
7a9d638989 | ||
|
|
40f5a8cf69 | ||
|
|
a6e50d32d2 | ||
|
|
9a88c3986d | ||
|
|
ac5241482c | ||
|
|
aff1b6215b | ||
|
|
e0ba05af59 | ||
|
|
55b9782058 | ||
|
|
4ab7faa800 | ||
|
|
c51c7b4e4d | ||
|
|
3375a905cc | ||
|
|
f3c8d26cd8 | ||
|
|
e1dc30b6e1 | ||
|
|
a9e9bdc534 | ||
|
|
6c8b8c20f5 | ||
|
|
0e1ee196bd | ||
|
|
77a2e08eb2 | ||
|
|
a3c21f25c9 | ||
|
|
1e1952693a | ||
|
|
efc0170ee8 | ||
|
|
0d0a97eb00 | ||
|
|
47a6716921 | ||
|
|
18e6c6f17b | ||
|
|
ef6f46a3b5 | ||
|
|
31562124a3 | ||
|
|
b544c1be3a | ||
|
|
0c0a17b641 | ||
|
|
2e90a8af5a | ||
|
|
2294113c78 | ||
|
|
bdb117ded6 | ||
|
|
beadcbb17d | ||
|
|
fe57e80c18 | ||
|
|
ac07bf278a | ||
|
|
3c33cb4556 | ||
|
|
f94d85aa8e | ||
|
|
b0da05656d | ||
|
|
9828f21007 | ||
|
|
6ee4b2dc84 | ||
|
|
b3a15e7288 | ||
|
|
f771cb0d39 | ||
|
|
c4fb93647a | ||
|
|
81535c5244 | ||
|
|
7442052875 | ||
|
|
84232cf306 | ||
|
|
c339a1abe9 | ||
|
|
766927d1d4 | ||
|
|
fc78c93ad2 | ||
|
|
4987a67293 | ||
|
|
131b2b8c63 | ||
|
|
342b8398e2 | ||
|
|
6b260e1686 | ||
|
|
6294875a04 | ||
|
|
8cc9fcae6f | ||
|
|
4769dd0689 | ||
|
|
0fb1121f36 | ||
|
|
4aaa9b401d | ||
|
|
44087c4866 | ||
|
|
f36f7dbfdf | ||
|
|
07c0e6a50f | ||
|
|
ed321809d3 | ||
|
|
13ef91744c | ||
|
|
5b8fe7ffa5 | ||
|
|
8cd57ef8de | ||
|
|
1b813f805b | ||
|
|
f3a8a7360d | ||
|
|
42e9121eba | ||
|
|
4597c09522 | ||
|
|
2ec251c2e2 | ||
|
|
e717a59174 | ||
|
|
c88576b2fc | ||
|
|
901f7e9c47 | ||
|
|
0f4dc80c99 | ||
|
|
353f3a23e1 | ||
|
|
4b4ad6285e | ||
|
|
6b007c70c7 | ||
|
|
6a48870594 | ||
|
|
5d73dea577 | ||
|
|
82e461ff9d | ||
|
|
e30abff31b | ||
|
|
7fee9122fb | ||
|
|
2342171cdf | ||
|
|
58c9eccffd | ||
|
|
23fa1666dd | ||
|
|
fa2e385713 | ||
|
|
958c361c97 | ||
|
|
72e8af59aa | ||
|
|
873fbee01b | ||
|
|
1a3eb478a7 | ||
|
|
bc0ecfc8f6 | ||
|
|
4ad2c75b52 | ||
|
|
9662633059 | ||
|
|
18fe2fd00a | ||
|
|
19f9a6adc2 | ||
|
|
11b4fd3956 | ||
|
|
8d2c20f160 | ||
|
|
6d7d0e7d39 | ||
|
|
39f8117c27 | ||
|
|
e709329b03 | ||
|
|
1a3ae1411e | ||
|
|
35daba1194 | ||
|
|
65c5b0bf8d | ||
|
|
cd884fa529 | ||
|
|
3a72464068 | ||
|
|
32e242586c | ||
|
|
a7f4b26f90 | ||
|
|
9bcae7060a | ||
|
|
98fdb042a1 | ||
|
|
ceaee440a6 | ||
|
|
6eb4fb64a0 | ||
|
|
a75daba386 | ||
|
|
67d72d27c9 | ||
|
|
362f70b056 | ||
|
|
035e25496a | ||
|
|
10da9f7012 | ||
|
|
c18a977e96 | ||
|
|
0954077fd7 | ||
|
|
bde39ce91d | ||
|
|
a422316d48 | ||
|
|
21aa04e3c3 | ||
|
|
4cc72b9f69 | ||
|
|
50ff352e41 | ||
|
|
027d7e466a | ||
|
|
69f51ac183 | ||
|
|
d8bc8b62e9 | ||
|
|
f9773889a1 | ||
|
|
6dabefa9db | ||
|
|
5364f84119 | ||
|
|
4ba7d50174 | ||
|
|
12729c4d7e | ||
|
|
44beab63c9 | ||
|
|
669627d1b6 | ||
|
|
1c45df1e03 | ||
|
|
f91a9c569d | ||
|
|
248a1dd01a | ||
|
|
3a75b51b59 | ||
|
|
2b4097bc13 | ||
|
|
8151b89b81 | ||
|
|
cbd7fb7d37 | ||
|
|
77293ff9c4 | ||
|
|
467b462b79 | ||
|
|
242b573f9a | ||
|
|
2d5f12b9a6 | ||
|
|
3c73c0c0cd | ||
|
|
ec17cfcbf1 | ||
|
|
1d0b1671f8 | ||
|
|
bbd800f974 | ||
|
|
12ab19f5fd | ||
|
|
05d172a1f5 | ||
|
|
45a9efb37f | ||
|
|
62bafb102d | ||
|
|
4eda1d092d | ||
|
|
5dd09d76c3 | ||
|
|
23cb1b7f19 | ||
|
|
c1f984a176 | ||
|
|
662f9ac8f7 | ||
|
|
ae26454014 | ||
|
|
5e1d64825c | ||
|
|
8767e73fe9 | ||
|
|
071462199d | ||
|
|
3bb23e355e | ||
|
|
c4998ebf3f | ||
|
|
a13b581760 | ||
|
|
c8c8d5db78 | ||
|
|
ad3d4aecbb | ||
|
|
87484a1754 | ||
|
|
58b9ec9e08 | ||
|
|
6911642122 | ||
|
|
3ede91cca6 | ||
|
|
5d5756cc83 | ||
|
|
5ad62b9415 | ||
|
|
88c8c47ce0 | ||
|
|
e4f656616c | ||
|
|
b05933fbc4 | ||
|
|
e5f549099b | ||
|
|
ea10e61f7d | ||
|
|
915f40d12a | ||
|
|
0c2c7f4016 | ||
|
|
135ce43169 | ||
|
|
0f94dbcdb3 | ||
|
|
f30bab47e6 | ||
|
|
baeaad61d9 | ||
|
|
c750be0107 | ||
|
|
84d051fc01 | ||
|
|
56f8222e12 | ||
|
|
78d2f67016 | ||
|
|
c24363ccda | ||
|
|
c052ed7ec8 | ||
|
|
5e88eb5761 | ||
|
|
4fb724fb7b | ||
|
|
e23b780072 | ||
|
|
d9058b3021 | ||
|
|
62fd5a76e1 | ||
|
|
6252c22112 | ||
|
|
26e6db1cc7 | ||
|
|
b7cdcb00ac | ||
|
|
153f18dc0a | ||
|
|
4012d0e30c | ||
|
|
494d237789 | ||
|
|
84c53d104a | ||
|
|
89fb89a456 | ||
|
|
960b610ff6 | ||
|
|
29eec32795 | ||
|
|
2fa7b998ba | ||
|
|
ebc438266d | ||
|
|
8f5eb45ba6 | ||
|
|
6284ceb2b6 | ||
|
|
5e2264d2b5 | ||
|
|
6e295a2097 | ||
|
|
19f9a5c2fa | ||
|
|
f63685f3c8 | ||
|
|
dc5f68fe5f | ||
|
|
0858cafffc | ||
|
|
2e343342d5 | ||
|
|
840c48752e | ||
|
|
0382b01687 | ||
|
|
ee72e803ec | ||
|
|
142142c040 | ||
|
|
6182aa30b1 | ||
|
|
ec9f8acf00 | ||
|
|
52b3a5bacc | ||
|
|
ac6b871f66 | ||
|
|
b17fb08f8b | ||
|
|
dd2e70e9b7 | ||
|
|
ba8cbf589b | ||
|
|
91dc0f3f4c | ||
|
|
7815c8ac6f | ||
|
|
233e61cf9a | ||
|
|
0e2611d3a6 | ||
|
|
96bd4a0619 | ||
|
|
6b78619cd1 | ||
|
|
0f458eec76 | ||
|
|
6b960ec031 | ||
|
|
fdc58131f8 | ||
|
|
63085f5bef | ||
|
|
091f9248dc | ||
|
|
dd7dd75334 | ||
|
|
b70dfae2ae | ||
|
|
0bd78a0604 | ||
|
|
9e0839c33f | ||
|
|
9bafa7e80d | ||
|
|
827293a13b | ||
|
|
6198daeb2c | ||
|
|
161ef5a224 | ||
|
|
9e99ad99d4 | ||
|
|
c36502ce31 | ||
|
|
f9b0d93ee0 | ||
|
|
4eaaf31249 | ||
|
|
c6b488a82c | ||
|
|
7cfc62922f | ||
|
|
5284f6d832 | ||
|
|
ae97c667e3 | ||
|
|
a2c1d46302 | ||
|
|
8b4b954332 | ||
|
|
c103d65284 | ||
|
|
c5183d0e34 | ||
|
|
16b435257b | ||
|
|
35f3595d02 | ||
|
|
0ee81dc9fe | ||
|
|
805885091f | ||
|
|
97ec6873fa | ||
|
|
d16cd39939 | ||
|
|
7439f94e22 | ||
|
|
443380731e | ||
|
|
56c6325ba0 | ||
|
|
0ae9db5dd6 | ||
|
|
677c29bf24 | ||
|
|
72376c4144 | ||
|
|
322625eeca | ||
|
|
9c1936fd07 | ||
|
|
3a94432e42 | ||
|
|
ce1f807aa0 | ||
|
|
a51af64dd9 | ||
|
|
a31d6069dc | ||
|
|
96353f2b64 | ||
|
|
2330455c8d | ||
|
|
91a88de6a1 | ||
|
|
2afe7a3e1e | ||
|
|
bec7f6977e | ||
|
|
60ecaffbe8 | ||
|
|
dcaee948d3 | ||
|
|
2fe7087d52 | ||
|
|
bd162028cd | ||
|
|
a214a305fd | ||
|
|
5093d5b5f6 | ||
|
|
0d9939dcd4 | ||
|
|
1b2de8ec5d | ||
|
|
ab2300500a | ||
|
|
fbf061260c | ||
|
|
4244d68240 | ||
|
|
dda31b3d4b | ||
|
|
2af172653c | ||
|
|
3247c0d229 | ||
|
|
eb024319de | ||
|
|
4ca9b139bb | ||
|
|
b79a37ead9 | ||
|
|
0ec2610f04 | ||
|
|
71a14d7df6 | ||
|
|
8936e76316 | ||
|
|
e21d6b3687 | ||
|
|
a6ab2291ba | ||
|
|
8f845aac23 | ||
|
|
439ea83081 | ||
|
|
42f68c1c76 | ||
|
|
8d252f82fd | ||
|
|
1ddb736b5a | ||
|
|
46fbbbd282 | ||
|
|
e7a7f018bd | ||
|
|
311fc89548 | ||
|
|
a6abdb8547 | ||
|
|
02407d98a5 | ||
|
|
b230a507e7 | ||
|
|
116add9d00 | ||
|
|
2415f3fa4d | ||
|
|
5f8d3fc639 | ||
|
|
1119299c4b | ||
|
|
2d91b93ad0 | ||
|
|
cf4dff471c | ||
|
|
101901ab40 | ||
|
|
17848a1868 | ||
|
|
9d21b48f8b | ||
|
|
2873d8ec91 | ||
|
|
9d63c7cd54 | ||
|
|
1f321dfc69 | ||
|
|
702165af46 | ||
|
|
8c8d9bd23f | ||
|
|
6ac3dce04e | ||
|
|
5b479b1090 | ||
|
|
b2033f3f9d | ||
|
|
f68b53afcd | ||
|
|
71a8ff0122 | ||
|
|
6569236642 | ||
|
|
8fa332618b | ||
|
|
0eef946e55 | ||
|
|
5d512e265a | ||
|
|
82e79e3f43 | ||
|
|
3e9d8ae731 | ||
|
|
325327dc3f | ||
|
|
bd20786c38 | ||
|
|
6e3f4c99be | ||
|
|
6db5626c3b | ||
|
|
eb199dce9c | ||
|
|
27b330f6f1 | ||
|
|
f231b7776b | ||
|
|
018a0108b1 | ||
|
|
55044627cc | ||
|
|
aa20fbfdf5 | ||
|
|
a6f5ef18c5 | ||
|
|
274efdf28f | ||
|
|
501452a500 | ||
|
|
336164246b | ||
|
|
e31d5a0e8f | ||
|
|
ed883c5230 | ||
|
|
7fee7d5019 | ||
|
|
970af7d1b4 | ||
|
|
12865fdfb8 | ||
|
|
1e7fe55be0 | ||
|
|
7170702ee4 | ||
|
|
081e4834d5 | ||
|
|
b541fef300 | ||
|
|
bd59677a84 | ||
|
|
7a0a8c25a2 | ||
|
|
a7ff66f09e | ||
|
|
dda59750e6 | ||
|
|
a99450c002 | ||
|
|
ebeb1c3f59 | ||
|
|
cd1ffbdb90 | ||
|
|
33ebce0880 | ||
|
|
cc43fd50d7 | ||
|
|
0752e837e5 | ||
|
|
2e9a426a78 | ||
|
|
406d3eb134 | ||
|
|
14e9834d55 | ||
|
|
bae3378171 | ||
|
|
71c382c043 | ||
|
|
7dcfc18309 | ||
|
|
9b984e8eba | ||
|
|
4e45fcc584 | ||
|
|
0d84f81305 | ||
|
|
2e65e64c06 | ||
|
|
4c4a4b611e | ||
|
|
ef1b005c95 | ||
|
|
fcbc889abf | ||
|
|
7be2a1bf3b | ||
|
|
c05fbf4573 | ||
|
|
d7a2bd7230 | ||
|
|
f489ba7bfc | ||
|
|
0a91c00ebe | ||
|
|
df2966b766 | ||
|
|
7c29094b51 | ||
|
|
88f6057eaa | ||
|
|
c2fa78096b | ||
|
|
8d1a4649f2 | ||
|
|
377ba25c6b | ||
|
|
1d136f0541 | ||
|
|
7b9629d6dc | ||
|
|
07c89b49ff | ||
|
|
a9854e1173 | ||
|
|
36fdc062ba | ||
|
|
5554964a8f | ||
|
|
b0cfab1d45 | ||
|
|
cce44c45d5 | ||
|
|
c8e0250903 | ||
|
|
f830265034 | ||
|
|
fea7ada700 | ||
|
|
ba8417edf3 | ||
|
|
2eb86a3be7 | ||
|
|
759dc98b32 | ||
|
|
7d080caaa3 | ||
|
|
b6e7fbdfdf | ||
|
|
9d10912ced | ||
|
|
dd8275528a | ||
|
|
b59c018afd | ||
|
|
e3f9f55c56 | ||
|
|
97aae7a7e4 | ||
|
|
2c0a4c9c17 | ||
|
|
9a0dba940d | ||
|
|
a7297d4db7 | ||
|
|
7cbb8ad3ba | ||
|
|
4489ddd8a5 | ||
|
|
c6a731bb2e | ||
|
|
222beaf4c7 | ||
|
|
763e48858b | ||
|
|
6c7dc9b7c9 | ||
|
|
e955849f0a | ||
|
|
27b3e21842 | ||
|
|
8652b650b6 | ||
|
|
17b921cbbc | ||
|
|
c3e6b4f917 | ||
|
|
2f2dc64681 | ||
|
|
5291aac96b | ||
|
|
b0da085857 | ||
|
|
407f2e95e0 | ||
|
|
7d3c3ce561 | ||
|
|
e8d49d60ff | ||
|
|
21613f194f | ||
|
|
afaa9e7f00 | ||
|
|
9c402f3799 | ||
|
|
8ccd7b994d | ||
|
|
3ed6e83c4a | ||
|
|
04bc64f593 | ||
|
|
73248bd639 | ||
|
|
2bfa89532d | ||
|
|
c6bc236769 | ||
|
|
ce6ec7720e | ||
|
|
39ff039b3b | ||
|
|
912b7e1e09 | ||
|
|
88fd117257 | ||
|
|
34ab4c4f32 | ||
|
|
5f3219a854 | ||
|
|
23d9383a36 | ||
|
|
39540db170 | ||
|
|
af0734832b | ||
|
|
81caa0fa5d | ||
|
|
dd66c1d342 | ||
|
|
6991ba8563 | ||
|
|
db877dca36 | ||
|
|
8eba94f271 | ||
|
|
24f4f82c09 | ||
|
|
05ae513b18 | ||
|
|
332bb459e6 | ||
|
|
307d9c2252 | ||
|
|
1094c7d61d | ||
|
|
876595e634 | ||
|
|
140b47e8e9 | ||
|
|
cd8c0085b1 | ||
|
|
75b7d1e2af | ||
|
|
10d0ebb9fe | ||
|
|
a18ec3f693 | ||
|
|
02432cf063 | ||
|
|
d83d081942 | ||
|
|
ce2db02200 | ||
|
|
df92a33ca9 | ||
|
|
153520e20e | ||
|
|
b21a59705f | ||
|
|
a263b353a1 | ||
|
|
a18a5eaecd | ||
|
|
be6146b0a8 | ||
|
|
8057da700c | ||
|
|
8f24d28130 | ||
|
|
2553a230d0 | ||
|
|
4b6a5da86a | ||
|
|
7d251f5a74 | ||
|
|
5f9a6ea621 | ||
|
|
58248412bd | ||
|
|
5b0a7890ea | ||
|
|
4962559e5c | ||
|
|
f72e39fc10 | ||
|
|
7922028d7c | ||
|
|
881edbf122 | ||
|
|
51b54191a8 | ||
|
|
fa6e58074d | ||
|
|
1c243a5b12 | ||
|
|
7eb5f39255 | ||
|
|
8d1bb15075 | ||
|
|
5ae6b16c0f | ||
|
|
8c96dca362 | ||
|
|
1d8f5f29a5 | ||
|
|
b31f0da5c6 | ||
|
|
a02e57dde8 | ||
|
|
a000c1943d | ||
|
|
86e3564356 | ||
|
|
c61a5ea2c4 | ||
|
|
89bb6158eb | ||
|
|
646e197eed | ||
|
|
699c25568c | ||
|
|
0c579aca9c | ||
|
|
bc8281c016 | ||
|
|
91510e39ab | ||
|
|
7b0db25a74 | ||
|
|
18f0e1e20c | ||
|
|
a36d81c55c | ||
|
|
976dd83a62 | ||
|
|
9019e27ec5 | ||
|
|
a1c5a1f4d2 | ||
|
|
c4b0c7ce05 | ||
|
|
43b014c82a | ||
|
|
1e2d6f619b | ||
|
|
f1d8451b09 | ||
|
|
481bb94c5f | ||
|
|
a778e595b3 | ||
|
|
ee9e9dfc89 | ||
|
|
0f1ded2ac8 | ||
|
|
44bc4a9eb7 | ||
|
|
89d6d0c70f | ||
|
|
1cf1e06582 | ||
|
|
700b3102af | ||
|
|
c040b28fb8 | ||
|
|
af54437b44 | ||
|
|
202c1ea2ac | ||
|
|
5348d246ba | ||
|
|
37f616ee4e | ||
|
|
bf8089c37b | ||
|
|
65b3aa973a | ||
|
|
bebcb94653 | ||
|
|
19025f5cb4 | ||
|
|
327ab58a84 | ||
|
|
a697d1af87 | ||
|
|
d6270f4691 | ||
|
|
2ad9ae55c0 | ||
|
|
32e1652c9c | ||
|
|
6878c95ea8 | ||
|
|
8a9641c182 | ||
|
|
70ec2ca2e3 | ||
|
|
b58088a397 | ||
|
|
87c256aebf | ||
|
|
5f45112678 | ||
|
|
36723bc118 | ||
|
|
5c1ce1e033 | ||
|
|
6b45a943a8 | ||
|
|
ce59173f4f | ||
|
|
9d230dd132 | ||
|
|
0d471d146c | ||
|
|
da35da1d8c | ||
|
|
2469ba0a12 | ||
|
|
8a1a26018b | ||
|
|
839148bbc8 | ||
|
|
68f730355e | ||
|
|
565dbf34bd | ||
|
|
a700ec5ff2 | ||
|
|
5417561b4a | ||
|
|
ce6a8ebb08 | ||
|
|
3ce17181b6 | ||
|
|
f367935628 | ||
|
|
d580edbd40 | ||
|
|
033b290217 | ||
|
|
ea49bfc2b4 | ||
|
|
847007d48d | ||
|
|
261254f7b6 | ||
|
|
0d499d4f1a | ||
|
|
e079f9d61b | ||
|
|
ceabc0a404 | ||
|
|
523b8b44a2 | ||
|
|
d2d1796eb5 | ||
|
|
c67e5f7425 | ||
|
|
1b8686d044 | ||
|
|
a4de1428f9 | ||
|
|
524f6c0682 | ||
|
|
fa18fce7e8 | ||
|
|
96be1bb155 | ||
|
|
23c6b42b26 | ||
|
|
6307635b5f | ||
|
|
47e7cda4e9 | ||
|
|
5dd3b2bffd | ||
|
|
12f0e24519 | ||
|
|
b137741385 | ||
|
|
233804fedc | ||
|
|
0c90e57eaf | ||
|
|
8fb4ab3d92 | ||
|
|
8c9e250801 | ||
|
|
04aee56a36 | ||
|
|
4f1fabc2a4 | ||
|
|
43bc356337 | ||
|
|
41991bab70 | ||
|
|
2b5086167f | ||
|
|
b46d16f48c | ||
|
|
9fef0eb3f3 | ||
|
|
30b0a1741e | ||
|
|
945b9dc08f | ||
|
|
904b064da4 | ||
|
|
7ae62af073 | ||
|
|
7525a79c93 | ||
|
|
07287b5783 | ||
|
|
0a2a62ac20 | ||
|
|
5581c62a3a | ||
|
|
6b5bdb7563 | ||
|
|
2bdffc89c2 | ||
|
|
65e6449c95 | ||
|
|
2829f7da9e | ||
|
|
ece44c2842 | ||
|
|
0fa335c149 | ||
|
|
5c0ad57c2c | ||
|
|
b2934e7cf6 | ||
|
|
2af7114ea1 | ||
|
|
0e1cc9203e | ||
|
|
e255ccc145 | ||
|
|
9447a55b61 | ||
|
|
9fdceeb2b2 | ||
|
|
18ee5f8119 | ||
|
|
ab6a17059c | ||
|
|
81c5e94850 | ||
|
|
99dc83062a | ||
|
|
4d8ea6729f | ||
|
|
ac85091ecd | ||
|
|
ffa640c2b0 | ||
|
|
c73bcba7e6 | ||
|
|
329e1cf61c | ||
|
|
854f766dc7 | ||
|
|
5c73fdbfdc | ||
|
|
097549748a | ||
|
|
032309941b | ||
|
|
d93a581fb8 | ||
|
|
52075ab386 | ||
|
|
d65ae4b1d7 | ||
|
|
c32d27f59e | ||
|
|
883d65a54a | ||
|
|
94728fb73f | ||
|
|
520f0e5ddb | ||
|
|
fa39b49a5c | ||
|
|
0490018903 | ||
|
|
b434c8f424 | ||
|
|
79de2d9f09 | ||
|
|
2031e17b3c | ||
|
|
5a050c1383 | ||
|
|
404c5bd341 | ||
|
|
2134209960 | ||
|
|
1e8c029562 | ||
|
|
932b037d66 | ||
|
|
26a48586a0 | ||
|
|
683f4263ef | ||
|
|
ebfa1e936b | ||
|
|
509782e78b | ||
|
|
776b408f76 | ||
|
|
fee5981ebf | ||
|
|
d9e9604979 | ||
|
|
3606380bdb | ||
|
|
640b967463 | ||
|
|
b8b9913695 | ||
|
|
9e2720dfcc | ||
|
|
b329dd0d4e | ||
|
|
1b10352591 | ||
|
|
bba2874451 | ||
|
|
0322441640 | ||
|
|
8868d2ebe4 | ||
|
|
f19acc1c90 | ||
|
|
47f24b4097 | ||
|
|
c2597aab22 | ||
|
|
47065938da | ||
|
|
790620024e | ||
|
|
42b01df89e | ||
|
|
aafae2bc50 | ||
|
|
e5b9ea5ee6 | ||
|
|
1c2ff140cb | ||
|
|
f7c608e65e | ||
|
|
ec810c91fe | ||
|
|
17bea86e89 | ||
|
|
3e0026d907 | ||
|
|
3e98377bf2 | ||
|
|
0658bc80f3 | ||
|
|
e96a9b0e1b | ||
|
|
08c30b8f06 | ||
|
|
05212df1c5 | ||
|
|
7ec68dd463 | ||
|
|
6eb5131b85 | ||
|
|
736cd7641d | ||
|
|
78bd5dd3df | ||
|
|
ecd675e0a6 | ||
|
|
5675895460 | ||
|
|
0f8f870bd3 | ||
|
|
a51e38e60d | ||
|
|
8fe1595f92 | ||
|
|
2497f500d5 | ||
|
|
afa92d58f6 | ||
|
|
958cafb2c0 | ||
|
|
1d1bf0d393 | ||
|
|
3094320203 | ||
|
|
39de98777d | ||
|
|
8084f6f4e2 | ||
|
|
6ef45e5cf1 | ||
|
|
444b90a9cf | ||
|
|
72a3dc17ee | ||
|
|
88c748f47a | ||
|
|
7e8c89d619 | ||
|
|
694f915003 | ||
|
|
a77b409619 | ||
|
|
1faff791ce | ||
|
|
8b8afe0fda | ||
|
|
09a120a59b | ||
|
|
c769c7789e | ||
|
|
3ea3965e5e | ||
|
|
ee8391db34 | ||
|
|
e1cc97d9d7 | ||
|
|
f30756a9bb | ||
|
|
33b474b224 | ||
|
|
485a7aa330 | ||
|
|
59117e6e3d | ||
|
|
8ee3ead743 | ||
|
|
bc39e4f9b6 | ||
|
|
3017d87ade | ||
|
|
d8f1d4572b | ||
|
|
41d8dd8b80 | ||
|
|
bcf3dbbb93 | ||
|
|
bfc0c5e531 | ||
|
|
013ebac8d8 | ||
|
|
fbc2e4f70f | ||
|
|
72468d6817 | ||
|
|
5dec940523 | ||
|
|
761a6811c1 | ||
|
|
b3a023f9dd | ||
|
|
5aa217fe0d | ||
|
|
737438d026 | ||
|
|
1715c90841 | ||
|
|
187299a20b | ||
|
|
89d8bddd9b | ||
|
|
ba649c56bf | ||
|
|
3456577268 | ||
|
|
b52e700666 | ||
|
|
ee32f1f7aa | ||
|
|
5af0da9de6 | ||
|
|
879a6d793f | ||
|
|
2734f93e30 | ||
|
|
2b97124e4a | ||
|
|
7815a5801e | ||
|
|
501e1be3cf | ||
|
|
fc386a6dca | ||
|
|
2a134a0ddd | ||
|
|
17250d7e8d | ||
|
|
65d28709c3 | ||
|
|
d6c6c78d1b | ||
|
|
67ffa00b1d | ||
|
|
a581847345 | ||
|
|
bcd26a4ae4 | ||
|
|
e38c345f23 | ||
|
|
0421fb04c2 | ||
|
|
82186b916f | ||
|
|
15eed5beda | ||
|
|
81837bd55b | ||
|
|
3dec6a1cdf | ||
|
|
fe14427129 | ||
|
|
be27588418 | ||
|
|
fb84437cd1 | ||
|
|
d9b495ca38 | ||
|
|
6b93d4794f | ||
|
|
5d3849a510 | ||
|
|
fef142f811 | ||
|
|
2684e51aa5 | ||
|
|
e814f9605a | ||
|
|
5d136a46ed | ||
|
|
b0b750dfa1 | ||
|
|
e3034e1d91 | ||
|
|
1a259b76da | ||
|
|
ae64ff7084 | ||
|
|
d67d3a4620 | ||
|
|
196bc48723 | ||
|
|
1c6c7bc481 | ||
|
|
6e23a32282 | ||
|
|
f398c9c035 | ||
|
|
0144aa8dc5 | ||
|
|
0df5dcf09c | ||
|
|
f9baaa6b87 | ||
|
|
67ff78925b | ||
|
|
5c611083f2 | ||
|
|
976d57ea45 | ||
|
|
63569fcd63 | ||
|
|
98b3a13b46 | ||
|
|
ca3bff6a7c | ||
|
|
563a4ac523 | ||
|
|
14ea9f8bfd | ||
|
|
05e38e127e | ||
|
|
1ef80d8082 | ||
|
|
597b6bd204 | ||
|
|
7e9a664764 | ||
|
|
79449a358d | ||
|
|
2d04db9ac8 | ||
|
|
3e7a28481c | ||
|
|
79225f2e65 | ||
|
|
e1c1bbf26d | ||
|
|
c4808f002e | ||
|
|
42203b366d | ||
|
|
1f11b8b350 | ||
|
|
ea23621c70 | ||
|
|
ab2bc6e8d1 | ||
|
|
c520041b83 | ||
|
|
e626fca6a7 | ||
|
|
92b6262224 | ||
|
|
e8dea9e770 | ||
|
|
28080c8d5f | ||
|
|
0cea6dde02 | ||
|
|
22482e099a | ||
|
|
7aba888e99 | ||
|
|
c61482d2cf | ||
|
|
db941ebd8f | ||
|
|
7add6fc80b | ||
|
|
eb9d74090e | ||
|
|
61351d44d7 | ||
|
|
aa73bd9d0d | ||
|
|
b08350db15 | ||
|
|
f63f78225d | ||
|
|
60aa4aa82d | ||
|
|
37264e21fb | ||
|
|
fe2591054c | ||
|
|
fd0c3d7f08 | ||
|
|
b325cc22b8 | ||
|
|
5f754820da | ||
|
|
43acc747d5 | ||
|
|
b3dec98757 | ||
|
|
b1795a08fb | ||
|
|
1307cac0c2 | ||
|
|
dc1567c8bc | ||
|
|
22c524b0e0 | ||
|
|
9a225c3968 | ||
|
|
0270e5694c | ||
|
|
4ff902dab9 | ||
|
|
64b3bd28e3 | ||
|
|
d8e506c648 | ||
|
|
aa6c809e5a | ||
|
|
1c27d6918f | ||
|
|
9f2491694d | ||
|
|
14245f2e24 | ||
|
|
8a1d480274 | ||
|
|
78b29a5c2f | ||
|
|
20d31daec0 | ||
|
|
5a8f212630 | ||
|
|
34e77f9897 | ||
|
|
93876acc5e | ||
|
|
031283efb1 | ||
|
|
23c54feddd | ||
|
|
04e04edbfe | ||
|
|
cbedcd967e | ||
|
|
fa08bd7e91 | ||
|
|
874d119dd9 | ||
|
|
eb43d93b57 | ||
|
|
c1a0084bb3 | ||
|
|
e8fb01e1ed | ||
|
|
0543f551c7 | ||
|
|
27f320b27f | ||
|
|
c0dffd9b3e | ||
|
|
66a97d038e | ||
|
|
2e8377a708 | ||
|
|
a76cfb7dc7 | ||
|
|
409dce8a89 | ||
|
|
5b14746045 | ||
|
|
a3d2e8323a | ||
|
|
2be4deb980 | ||
|
|
5f71547262 | ||
|
|
6c791a0559 | ||
|
|
7fd6f66b7f | ||
|
|
a1b48be22e | ||
|
|
bb5584bc4c | ||
|
|
3e57660394 | ||
|
|
803619cabf | ||
|
|
8c1a69d1f6 | ||
|
|
24423ce4a7 | ||
|
|
76b071cf74 | ||
|
|
407a7d9e70 | ||
|
|
0d0055df05 | ||
|
|
63b3be2f13 | ||
|
|
62c68998d7 | ||
|
|
4125d741cf | ||
|
|
bd07ffb9f4 | ||
|
|
700199c944 | ||
|
|
40a5f48632 | ||
|
|
83ca466071 | ||
|
|
a7e8a9b4d4 | ||
|
|
3f10c1726d | ||
|
|
832eaa1f67 | ||
|
|
e2b2d25f24 | ||
|
|
3fa370fa2e | ||
|
|
88cff614ed | ||
|
|
b23cac9c05 | ||
|
|
c928962ea8 | ||
|
|
ee011b1bf9 | ||
|
|
dd2c3e3a8e | ||
|
|
f74e3fbb0f | ||
|
|
e3f7733de1 | ||
|
|
5436111796 | ||
|
|
b8a502ae87 | ||
|
|
28d4e08a4b | ||
|
|
ef464797c1 | ||
|
|
2966f794fc | ||
|
|
37e34aaff2 | ||
|
|
aa6df53779 | ||
|
|
e3170801c5 | ||
|
|
4e9ef94365 | ||
|
|
9f54acd6bd | ||
|
|
e735faac75 | ||
|
|
3c67427272 | ||
|
|
a1865e9d8b | ||
|
|
875dd2e7a9 | ||
|
|
75dc703d6a | ||
|
|
fd6324f800 | ||
|
|
a41cd0a0ab | ||
|
|
b548b5f96f | ||
|
|
2bfbb4cbf2 | ||
|
|
b2297592f3 | ||
|
|
fe6073e87e | ||
|
|
09557f308c | ||
|
|
10c2053967 | ||
|
|
55e7b079f1 | ||
|
|
035fc3a817 | ||
|
|
5faf7d8001 | ||
|
|
2ada6b20a2 | ||
|
|
16181f1cfb | ||
|
|
8c07dec7a9 | ||
|
|
a9e8c588e9 | ||
|
|
bf6812ea86 | ||
|
|
5f6b0a00e2 | ||
|
|
d55a17ee43 | ||
|
|
96ce8b63bc | ||
|
|
1d1cc1ff5b | ||
|
|
6f3ed0ecd9 | ||
|
|
7e90bc082a | ||
|
|
d11d173db1 | ||
|
|
72e662bcb7 | ||
|
|
a934622220 | ||
|
|
c448bc0a29 | ||
|
|
b0b85dc32f | ||
|
|
75811bd4b1 | ||
|
|
bb84c696e2 | ||
|
|
4d5e442c25 | ||
|
|
91606d49f2 | ||
|
|
85bbb497d3 | ||
|
|
1bbd87f435 | ||
|
|
bf149c6426 | ||
|
|
ca03debe59 | ||
|
|
2d168e3723 | ||
|
|
2c1ede8449 | ||
|
|
b2a06ed720 | ||
|
|
2874584be4 | ||
|
|
91e801b451 | ||
|
|
b0648d79d4 | ||
|
|
437d608772 | ||
|
|
d57934d529 | ||
|
|
4470b88c50 | ||
|
|
03595a83d0 | ||
|
|
5d24b67f5e | ||
|
|
3b9ee4f322 | ||
|
|
0ca26cce94 | ||
|
|
29528d00ec | ||
|
|
af34f50b8c | ||
|
|
e7b32b1e6a | ||
|
|
4049bf2801 | ||
|
|
ad33537769 | ||
|
|
d5e34c1b5e | ||
|
|
5e586f3781 | ||
|
|
455177e749 | ||
|
|
b85b7319aa | ||
|
|
0b73154601 | ||
|
|
da48399f89 | ||
|
|
08504d913c | ||
|
|
fe67466701 | ||
|
|
47e5d0cd9e | ||
|
|
f0d830a8ca | ||
|
|
6d3f523c57 | ||
|
|
87a36f9d5b | ||
|
|
9d12d72fb7 | ||
|
|
fc88117065 | ||
|
|
0debda0bbb | ||
|
|
49f10736d1 | ||
|
|
3d0d2ea6bb | ||
|
|
a2499d3451 | ||
|
|
7db0aab330 | ||
|
|
150eb5bf18 | ||
|
|
bcc0de69d4 | ||
|
|
cab89b9b9c | ||
|
|
5b95a21401 | ||
|
|
78c83dbcff | ||
|
|
98ced5196c | ||
|
|
63272a10d7 | ||
|
|
07c798ff82 | ||
|
|
750d72873d | ||
|
|
81dddac7d6 | ||
|
|
405b912f7e | ||
|
|
e1884aa8a8 | ||
|
|
ffb01385dd | ||
|
|
c5adc4b580 | ||
|
|
69b9106646 | ||
|
|
565688a963 | ||
|
|
5205f3646d | ||
|
|
3c57a0f084 | ||
|
|
ed2088a4e5 | ||
|
|
8b36001c0e | ||
|
|
cd300805d1 | ||
|
|
8d3d0404fe | ||
|
|
9985f12cd4 | ||
|
|
5a42657cdb | ||
|
|
43d6128036 | ||
|
|
e802625b7c | ||
|
|
105be6a0ab | ||
|
|
8ec2a142c9 | ||
|
|
c4ec970bb2 | ||
|
|
cf7e58a297 | ||
|
|
03233a5ca7 | ||
|
|
9bc847e656 | ||
|
|
22965c443f | ||
|
|
6f23c88e84 | ||
|
|
4afafe9538 | ||
|
|
50dda3492c | ||
|
|
f4a44f00b8 | ||
|
|
dd13a0d60b | ||
|
|
80b751a225 | ||
|
|
8556dd1aa1 | ||
|
|
c43e4cffaf | ||
|
|
44ee5be6db | ||
|
|
cfd1cf6def | ||
|
|
15ce6488dd | ||
|
|
4f199f86f7 | ||
|
|
1f1f6801bb | ||
|
|
1ee776b09b | ||
|
|
8bb9d5f0f2 | ||
|
|
88ea901938 | ||
|
|
fd41f20bb8 | ||
|
|
1d5c681f0f | ||
|
|
c467afa37c | ||
|
|
53a90e51d4 | ||
|
|
62e3747a11 | ||
|
|
882ba36ef8 | ||
|
|
3b699c5248 | ||
|
|
b82945b689 | ||
|
|
3851d89b17 | ||
|
|
4360db9f6d | ||
|
|
355de6c757 | ||
|
|
ceacd8d885 | ||
|
|
bc36bb416b | ||
|
|
e86ff0e79d | ||
|
|
590703db95 | ||
|
|
727212b12f | ||
|
|
593bdfe098 | ||
|
|
ba22d17d1f | ||
|
|
0caee746fb | ||
|
|
d2d41ebc33 | ||
|
|
9272b5177e | ||
|
|
62967259a4 | ||
|
|
224b54c367 | ||
|
|
0734c4ccb3 | ||
|
|
6b9345a5f9 | ||
|
|
ff5694b1a6 | ||
|
|
467a574e34 | ||
|
|
4043ecf922 | ||
|
|
e052488674 | ||
|
|
52aade5356 | ||
|
|
1491651ea9 | ||
|
|
d969934fa4 | ||
|
|
bda45f0d60 | ||
|
|
96e579720e | ||
|
|
9d88725a97 | ||
|
|
def5f4a11a | ||
|
|
c4275519ae | ||
|
|
b164a261cf | ||
|
|
f89bd82dcd | ||
|
|
ab4912a5a1 | ||
|
|
85d737fc29 | ||
|
|
0224d8cd38 | ||
|
|
f0730043c6 | ||
|
|
e0efa0c2b3 | ||
|
|
b9826f0c42 | ||
|
|
94d6767d07 | ||
|
|
226dc99ad4 | ||
|
|
eea384cdf7 | ||
|
|
76f5c6d4c5 | ||
|
|
79ef111398 | ||
|
|
af2998040a | ||
|
|
1f6c140716 | ||
|
|
b08008c5b2 | ||
|
|
c011e81b38 | ||
|
|
683d45ffd6 | ||
|
|
07d6e7db03 | ||
|
|
02ea29a99d | ||
|
|
f1849c6a47 | ||
|
|
03bb3c2f74 | ||
|
|
fdf0bec556 | ||
|
|
c736e69d48 | ||
|
|
d7156f9b3d | ||
|
|
15b3fdf6f4 | ||
|
|
0e1ba1fb70 | ||
|
|
ee590a9795 | ||
|
|
076d41d627 | ||
|
|
4830d90c32 | ||
|
|
2f8cc39a1a | ||
|
|
8602471486 | ||
|
|
1ee74864e9 | ||
|
|
81404fb71c | ||
|
|
845ad88cec | ||
|
|
9b6b57df50 | ||
|
|
fefeeb4c70 | ||
|
|
bbc0c69624 | ||
|
|
dcfcfdaa1e | ||
|
|
e41b0d67d6 | ||
|
|
1ec992abd1 | ||
|
|
b8ae5c6054 | ||
|
|
7c530bc55f | ||
|
|
3377542e27 | ||
|
|
cf5d9ffa49 | ||
|
|
686c3fcd7a | ||
|
|
78a24cea81 | ||
|
|
fd93ebb78d | ||
|
|
56dd3fc928 | ||
|
|
d830304e6d | ||
|
|
4960f390e2 | ||
|
|
9ba6dd71d7 | ||
|
|
dd6441b546 | ||
|
|
09cc6c3199 | ||
|
|
7f7b648443 | ||
|
|
cc571eb1ea | ||
|
|
b3b4e2b8f8 | ||
|
|
28647cf29f | ||
|
|
0c8511f222 | ||
|
|
a515fefda9 | ||
|
|
1215f5fe69 | ||
|
|
93cde78d9b | ||
|
|
1730fd0d5f | ||
|
|
7d58309a4f | ||
|
|
a865c07818 | ||
|
|
cd269a4558 | ||
|
|
2b3af4ad51 | ||
|
|
6ec338aa30 | ||
|
|
fb61d0c98f | ||
|
|
7620193722 | ||
|
|
f8bd406deb | ||
|
|
d9b60e7fc9 | ||
|
|
d0a41799da | ||
|
|
dcc5395140 | ||
|
|
980ff3eadd | ||
|
|
f36fde92d6 | ||
|
|
6b616d1730 | ||
|
|
6dc36483f4 | ||
|
|
574b764391 | ||
|
|
1e795e038b | ||
|
|
7cca84ba57 | ||
|
|
342ba18561 | ||
|
|
d69c51e958 | ||
|
|
8b73542d89 | ||
|
|
1c76bc950d | ||
|
|
141212f27d | ||
|
|
3dcdb1ff7d | ||
|
|
4620d5849c | ||
|
|
a0af3619d3 | ||
|
|
fcdf9c1b91 | ||
|
|
12cc3a9cbf | ||
|
|
bd816574ed | ||
|
|
b3b322e10b | ||
|
|
2c5532746f | ||
|
|
c70e58e6b5 | ||
|
|
879dbc3757 | ||
|
|
1f655f3f09 | ||
|
|
69e08d78ad | ||
|
|
f4f69742ad | ||
|
|
066463201a | ||
|
|
5d589d6d54 | ||
|
|
012f89d16b | ||
|
|
ce42c70d4c | ||
|
|
a48f7597e3 | ||
|
|
d166555fb4 | ||
|
|
5721355da7 | ||
|
|
c00868148e | ||
|
|
7f757cd253 | ||
|
|
a720c22303 | ||
|
|
bd992e3872 | ||
|
|
5207447327 | ||
|
|
f957e894e6 | ||
|
|
0eb841ec8b | ||
|
|
dc1e560d4e | ||
|
|
f69a78fa0b | ||
|
|
efb47cf374 | ||
|
|
507d09876d | ||
|
|
34fe924aff | ||
|
|
03bac73f3a | ||
|
|
fb51eb21e8 | ||
|
|
d0bc564259 | ||
|
|
fc3d809ce2 | ||
|
|
947ac8b2ab | ||
|
|
fa72d057db | ||
|
|
64eb855338 | ||
|
|
416ff71bed | ||
|
|
4ed4525155 | ||
|
|
c813de92d8 | ||
|
|
0420fa0299 | ||
|
|
5b7fcc8eca | ||
|
|
c84203bdd5 | ||
|
|
98bfef9072 | ||
|
|
4ec3b64c84 | ||
|
|
d705644f22 | ||
|
|
9c1cb79754 | ||
|
|
459ab05b22 | ||
|
|
2e7b6c9d14 | ||
|
|
2a8ffee621 | ||
|
|
623865c159 | ||
|
|
58ec828eab | ||
|
|
9835ae579b | ||
|
|
14847101c0 | ||
|
|
3980ac5894 | ||
|
|
d6be447ce9 | ||
|
|
b6fdea03f2 | ||
|
|
f46da343e2 | ||
|
|
d1d1d6533e | ||
|
|
890c073526 | ||
|
|
7e69022723 | ||
|
|
1c16cd5e9d | ||
|
|
362bfc5fe3 | ||
|
|
81d67eab92 | ||
|
|
fc0c5be08d | ||
|
|
824853d85d | ||
|
|
2c78131d1d | ||
|
|
157b9c0f3b | ||
|
|
ee89d2c6a4 | ||
|
|
4e40830eae | ||
|
|
46ffaa8e51 | ||
|
|
649ea391a4 | ||
|
|
4421e7ea2f | ||
|
|
e8794bd9ff | ||
|
|
136fd1d8a6 | ||
|
|
f627fc6045 | ||
|
|
7c2a47f8b9 | ||
|
|
1bfb549f7f | ||
|
|
9914de1bf4 | ||
|
|
f37d72d964 | ||
|
|
3e3748a800 | ||
|
|
61158ce7f4 | ||
|
|
7c17614143 | ||
|
|
d24cdcbcf3 | ||
|
|
4055442da5 | ||
|
|
fb5e5a79f6 | ||
|
|
88bec961af | ||
|
|
fc843adca9 | ||
|
|
3d42f226c2 | ||
|
|
821f938a11 | ||
|
|
76a14985d6 | ||
|
|
d4462330a5 | ||
|
|
d5d6bc28f7 | ||
|
|
8826f09cf4 | ||
|
|
e6886e4afc | ||
|
|
a40d7b53aa | ||
|
|
e0d44861af | ||
|
|
c236b29c75 | ||
|
|
e4315e82b0 | ||
|
|
91b722fec8 | ||
|
|
406ab86104 | ||
|
|
2c90120ce6 | ||
|
|
47d74dba90 | ||
|
|
aafe2a7337 | ||
|
|
63f4f3413f | ||
|
|
50f45932f9 | ||
|
|
da298638a2 | ||
|
|
cd0cef8442 | ||
|
|
1f30fd7bf3 | ||
|
|
3da98694a0 | ||
|
|
9abac5b134 | ||
|
|
ffe92ed2bb | ||
|
|
6f6c2b9c73 | ||
|
|
e44bd98fa4 | ||
|
|
e6049802ba | ||
|
|
43273caab1 | ||
|
|
ad3e26d042 | ||
|
|
9a8529667d | ||
|
|
6becbb2c66 | ||
|
|
8a239596a9 | ||
|
|
68faefed61 | ||
|
|
09399a9ac1 | ||
|
|
23c96cb998 | ||
|
|
6e2cd739da | ||
|
|
5197c8dba0 | ||
|
|
e17b1f97ca | ||
|
|
e4982ea82a | ||
|
|
c9fbb6c1ab | ||
|
|
ecc745d124 | ||
|
|
b806001e18 | ||
|
|
177463ed03 | ||
|
|
9ad71d27e0 | ||
|
|
5b550a7b37 | ||
|
|
8adb5f56de | ||
|
|
29d76eb5ca | ||
|
|
d7cafe671b | ||
|
|
ed3016b1c1 | ||
|
|
09586bb08f | ||
|
|
9adb76bf15 | ||
|
|
0cb6cc6222 | ||
|
|
123891de32 | ||
|
|
6942920ee8 | ||
|
|
891c46ed59 | ||
|
|
7f9c56ab05 | ||
|
|
a82c64b397 | ||
|
|
064d37134b | ||
|
|
6d7c93acf7 | ||
|
|
4f7a49ed78 | ||
|
|
18223121dd | ||
|
|
fe6c392d45 | ||
|
|
4558575d9e | ||
|
|
6c4eab8a07 | ||
|
|
9900b79eb6 | ||
|
|
f420d6867b | ||
|
|
2e8bcf65f6 | ||
|
|
e7a76f750b | ||
|
|
de42d88d2c | ||
|
|
1d5e38454e | ||
|
|
11a0108456 | ||
|
|
1cf2b63483 | ||
|
|
5b1ca76131 | ||
|
|
a23befcbf4 | ||
|
|
c81541de0a | ||
|
|
206a8e3eed | ||
|
|
b3bcf49d46 | ||
|
|
705f393109 | ||
|
|
6841ee321c | ||
|
|
a45e8e1e87 | ||
|
|
ba2dabe62d | ||
|
|
aa627d0844 | ||
|
|
df076baf56 | ||
|
|
59f7abe749 | ||
|
|
6bec0699cb | ||
|
|
7d379cf87a | ||
|
|
39b06cb31c | ||
|
|
601f76f96d | ||
|
|
2f2a688026 | ||
|
|
a2e9d08e38 | ||
|
|
b6a38edfcb | ||
|
|
9a92a10bba | ||
|
|
5ae0402bf0 | ||
|
|
313dafe928 | ||
|
|
f4ddde7f47 | ||
|
|
4a2a78b63b | ||
|
|
35dd662fea | ||
|
|
7769a21cef | ||
|
|
a50211ce2a | ||
|
|
d54a10f490 | ||
|
|
3098898a98 | ||
|
|
cab18e48ad | ||
|
|
a8a3cc3525 | ||
|
|
96d6a58052 | ||
|
|
e15276232e | ||
|
|
daeb358572 | ||
|
|
55622350c4 | ||
|
|
29d189b581 | ||
|
|
d947d90bf7 | ||
|
|
f3efa063e3 | ||
|
|
0ff261802b | ||
|
|
fb236c85af | ||
|
|
9c4ceeb147 | ||
|
|
fc761ed74f | ||
|
|
e66541f7d0 | ||
|
|
000f31fb73 | ||
|
|
bc8041add8 | ||
|
|
21229685cf | ||
|
|
fd9c615d88 | ||
|
|
90361256bc | ||
|
|
677f711c6c | ||
|
|
488a535aa0 | ||
|
|
e2839c38c5 | ||
|
|
ee7c5ebae9 | ||
|
|
938478e702 | ||
|
|
837fc231a9 | ||
|
|
429a4b0aec | ||
|
|
e332d0e5d7 | ||
|
|
2e917cf146 | ||
|
|
e7020c2d8c | ||
|
|
9e971b4937 | ||
|
|
bd018696bd | ||
|
|
ad7eb5d221 | ||
|
|
80ccbaa021 | ||
|
|
c24b42177e | ||
|
|
6fc6d809e0 | ||
|
|
0d95328125 | ||
|
|
41dbbc9b50 | ||
|
|
323b56a049 | ||
|
|
ea643e8658 | ||
|
|
cada464c90 | ||
|
|
0da4307aea | ||
|
|
143e3602ae | ||
|
|
0314fdb49e | ||
|
|
847b5bff85 | ||
|
|
864568bbd9 | ||
|
|
015f1c8c9a | ||
|
|
46bb9a0698 | ||
|
|
62af96c5c9 | ||
|
|
aee0abb5d2 | ||
|
|
721a628f4a | ||
|
|
10280f2e0d | ||
|
|
9ccfc6a423 | ||
|
|
d9b1c229e5 | ||
|
|
7a8602c54c | ||
|
|
dbb47e6bb6 | ||
|
|
67119f4875 | ||
|
|
3d1201007e | ||
|
|
15f478e26b | ||
|
|
0abbb9a2ce | ||
|
|
7d12b66fb8 | ||
|
|
814a2a6f94 | ||
|
|
4036b3543e | ||
|
|
1cf55db9be | ||
|
|
7c5db83261 | ||
|
|
7522e6c99c | ||
|
|
09f33a7c2c | ||
|
|
521e3ce0eb | ||
|
|
532fae24ac | ||
|
|
c661fad3eb | ||
|
|
09f82a5ad2 | ||
|
|
e775248b96 | ||
|
|
df618e5f7a | ||
|
|
6a357b6fcc | ||
|
|
7598aab521 | ||
|
|
9766f72760 | ||
|
|
60e8d63712 | ||
|
|
e3d257e7b5 | ||
|
|
c75f0f6780 | ||
|
|
c38ed76969 | ||
|
|
119609b871 | ||
|
|
dc7a05ebf9 | ||
|
|
d4eb69e1ab | ||
|
|
e4913bd0b0 | ||
|
|
feb9de4845 | ||
|
|
a39474c817 | ||
|
|
f526328b30 | ||
|
|
b48c78b154 | ||
|
|
c8d8608b57 | ||
|
|
35ba0edf0d | ||
|
|
345d0c3e2b | ||
|
|
2ddaa122ab | ||
|
|
7f7c71836c | ||
|
|
b5e8413d22 | ||
|
|
14686616c1 | ||
|
|
c89bc5cc4a | ||
|
|
6db8872406 | ||
|
|
a4fba7b0a0 | ||
|
|
3dc3957607 | ||
|
|
0bd8bea9ec | ||
|
|
780bd132ac | ||
|
|
dbdb03eddb | ||
|
|
f43a92a78f | ||
|
|
9229d72a37 | ||
|
|
5a2b4005bb | ||
|
|
9d24de4c57 | ||
|
|
fe37c71a4f | ||
|
|
28973c0a2d | ||
|
|
026acb2a57 | ||
|
|
da24e319af | ||
|
|
2e48975b8b | ||
|
|
56f9c987a2 | ||
|
|
36d4353229 | ||
|
|
935eee7592 | ||
|
|
0587501ff0 | ||
|
|
2790d9a1c3 | ||
|
|
696eb74918 | ||
|
|
654050b7e8 | ||
|
|
7bee2da169 | ||
|
|
60fbdd3988 | ||
|
|
14bc664b48 | ||
|
|
749a8e3b82 | ||
|
|
0470e7fb0f | ||
|
|
266f0b8487 | ||
|
|
fd41449410 | ||
|
|
af126bc68c | ||
|
|
e169c311d3 | ||
|
|
a4aedae063 | ||
|
|
aff6aa7c2c | ||
|
|
6d74750bba | ||
|
|
2b3a4cfdfe | ||
|
|
e76eecd533 | ||
|
|
dfc6352108 | ||
|
|
23899acadd | ||
|
|
7a7dd84818 | ||
|
|
8374928f74 | ||
|
|
b52d3c85c6 | ||
|
|
eab73f3d51 | ||
|
|
918a4d9110 | ||
|
|
c7be79e190 | ||
|
|
28f2fedab9 | ||
|
|
4e19770a1b | ||
|
|
68a614d463 | ||
|
|
e782275c2e | ||
|
|
9d10b0b4ea | ||
|
|
dd7c2d44fa | ||
|
|
c4e48c8f85 | ||
|
|
f7b81b5627 | ||
|
|
96b96735ed | ||
|
|
d7ae061a83 | ||
|
|
1423aab202 | ||
|
|
9e982f0b1d | ||
|
|
03f6cb89e6 | ||
|
|
69d5a131c9 | ||
|
|
9595b3336f | ||
|
|
10a41bd0fc | ||
|
|
e6841d0a27 | ||
|
|
79f09478b4 | ||
|
|
1ce21cd233 | ||
|
|
70a6c7b21d | ||
|
|
3a09e2bf8e | ||
|
|
d204183544 | ||
|
|
e78053938b | ||
|
|
37ebb81936 | ||
|
|
c02155340e | ||
|
|
fed651449e | ||
|
|
aa6d271975 | ||
|
|
50a2ed1124 | ||
|
|
103420769f | ||
|
|
7be01242a8 | ||
|
|
60c5561c84 | ||
|
|
f7ebc0a595 | ||
|
|
ffcb8f862f | ||
|
|
b815271f16 | ||
|
|
a4fd447146 | ||
|
|
ab36f62d59 | ||
|
|
fde4c74547 | ||
|
|
d08a3812d2 | ||
|
|
0d94172288 | ||
|
|
a73078ea75 | ||
|
|
8cf22b9ca2 | ||
|
|
5f4a5653ac | ||
|
|
648f2f8bc5 | ||
|
|
6a00ce47d2 | ||
|
|
d8fbf24c25 | ||
|
|
b97d7fc68f | ||
|
|
5d740b4611 | ||
|
|
0440473c63 | ||
|
|
73509ef227 | ||
|
|
41329ca504 | ||
|
|
b46d977403 | ||
|
|
0109708048 | ||
|
|
adbf487541 | ||
|
|
3fd3adc58e | ||
|
|
d0fd876d7e | ||
|
|
c9d544c8fb | ||
|
|
0715f36de8 | ||
|
|
beef95f21a | ||
|
|
ae27cb93db | ||
|
|
213b0505dc | ||
|
|
8094910c9a | ||
|
|
82b121caf1 | ||
|
|
89631ab4f1 | ||
|
|
4d7ac1999e | ||
|
|
370f1bc685 | ||
|
|
51c104bde0 | ||
|
|
286ab4e144 | ||
|
|
34fed9cabc | ||
|
|
01b3c23ffa | ||
|
|
7fc29e2323 | ||
|
|
b03817412b | ||
|
|
03b19aa069 | ||
|
|
69b84ce650 | ||
|
|
044e9b5390 | ||
|
|
30db2ad7fc | ||
|
|
b8d3588b54 | ||
|
|
3eefe215e0 | ||
|
|
e5d9bee80c | ||
|
|
7d7fb3c4b0 | ||
|
|
8449c6ae07 | ||
|
|
b77535feee | ||
|
|
a09019661e | ||
|
|
2986c6d0d6 | ||
|
|
01b47e3681 | ||
|
|
1d452edce6 | ||
|
|
3de433ea4a | ||
|
|
143ff7165d | ||
|
|
6cfd14f0c0 | ||
|
|
8d22756979 | ||
|
|
55804fad16 | ||
|
|
71e0fec389 | ||
|
|
98aca9c188 | ||
|
|
a13bacf8ad | ||
|
|
45007a3006 | ||
|
|
fdb5cac7f5 | ||
|
|
0478e50fba | ||
|
|
572a6b6f53 | ||
|
|
6b836f71d4 | ||
|
|
eee94e7a5a | ||
|
|
354fb08916 | ||
|
|
728700068e | ||
|
|
80a3391c48 | ||
|
|
c2b6a42855 | ||
|
|
5e024e079d | ||
|
|
27f4f9a51f | ||
|
|
e17927ca1d | ||
|
|
6a675e6966 | ||
|
|
9dabd2e2cb | ||
|
|
e604ed368d | ||
|
|
a6742bceff |
88
.cirrus.yml
Normal file
88
.cirrus.yml
Normal file
@@ -0,0 +1,88 @@
|
||||
---
|
||||
|
||||
# Main collection of env. vars to set for all tasks and scripts.
|
||||
env:
|
||||
####
|
||||
#### Global variables used for all tasks
|
||||
####
|
||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||
DEST_BRANCH: "master"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
GOPATH: &gopath "/var/tmp/go"
|
||||
GOBIN: "${GOPATH}/bin"
|
||||
GOCACHE: "${GOPATH}/cache"
|
||||
GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/podman"
|
||||
CIRRUS_WORKING_DIR: *gosrc
|
||||
# The default is 'sh' if unspecified
|
||||
CIRRUS_SHELL: "/bin/bash"
|
||||
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-34beta"
|
||||
PRIOR_FEDORA_NAME: "fedora-33"
|
||||
UBUNTU_NAME: "ubuntu-2010"
|
||||
PRIOR_UBUNTU_NAME: "ubuntu-2004"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c5032481331085312"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
|
||||
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_UBUNTU_CONTAINER_FQIN: "quay.io/libpod/prior-ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
|
||||
# Default timeout for each task
|
||||
timeout_in: 30m
|
||||
|
||||
|
||||
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
|
||||
|
||||
|
||||
# This task is critical. It updates the "last-used by" timestamp stored
|
||||
# in metadata for all VM images. This mechanism functions in tandem with
|
||||
# an out-of-band pruning operation to remove disused VM images.
|
||||
meta_task:
|
||||
name: "VM img. keepalive"
|
||||
alias: meta
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:$IMAGE_SUFFIX
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: >-
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
GCPNAME: ENCRYPTED[1cf558ae125e3c39ec401e443ad76452b25d790c45eb73d77c83eb059a0f7fd5085ef7e2f7e410b04ea6e83b0aab2eb1]
|
||||
GCPPROJECT: libpod-218412
|
||||
clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
|
||||
script: /usr/local/bin/entrypoint.sh
|
||||
|
||||
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
# of this task.
|
||||
success_task:
|
||||
name: "Total Success"
|
||||
alias: success
|
||||
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
|
||||
depends_on:
|
||||
- meta
|
||||
container: *smallcontainer
|
||||
env:
|
||||
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
|
||||
TEST_ENVIRON: container
|
||||
clone_script: *noop
|
||||
script: /bin/true
|
||||
25
.github/workflow/stale.yml
vendored
Normal file
25
.github/workflow/stale.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
# Please refer to https://github.com/actions/stale/blob/master/action.yml
|
||||
# to see all config knobs of the stale action.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
|
||||
stale-pr-message: 'A friendly reminder that this PR had no activity for 30 days.'
|
||||
stale-issue-label: 'stale-issue'
|
||||
stale-pr-label: 'stale-pr'
|
||||
days-before-stale: 30
|
||||
days-before-close: 365
|
||||
remove-stale-when-updated: true
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,2 +1,7 @@
|
||||
skopeo
|
||||
skopeo.1
|
||||
*.1
|
||||
/layers-*
|
||||
/skopeo
|
||||
result
|
||||
|
||||
# ignore JetBrains IDEs (GoLand) config folder
|
||||
.idea
|
||||
|
||||
120
.travis.yml
120
.travis.yml
@@ -1,19 +1,101 @@
|
||||
---
|
||||
language: go
|
||||
sudo: false
|
||||
notifications:
|
||||
email: false
|
||||
env:
|
||||
global:
|
||||
- GO15VENDOREXPERIMENT=1
|
||||
go:
|
||||
- 1.5
|
||||
- tip
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
script:
|
||||
- go vet .
|
||||
- test -z "$(golint . | tee /dev/stderr)"
|
||||
- test -z "$(gofmt -s -l . | tee /dev/stderr)"
|
||||
- go build -o skopeo .
|
||||
- go test -v .
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.15.x
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
||||
env:
|
||||
global:
|
||||
# Multiarch manifest will support architectures from this list. It should be the same architectures, as ones in image-build-push step in this Travis config
|
||||
- MULTIARCH_MANIFEST_ARCHITECTURES="amd64 s390x ppc64le"
|
||||
# env variables for stable image build
|
||||
- STABLE_IMAGE=quay.io/skopeo/stable:v1.2.0
|
||||
- EXTRA_STABLE_IMAGE=quay.io/containers/skopeo:v1.2.0
|
||||
# env variable for upstream image build
|
||||
- UPSTREAM_IMAGE=quay.io/skopeo/upstream:master
|
||||
|
||||
# Just declaration of the image-build-push step with script actions to execute
|
||||
x_base_steps:
|
||||
- &image-build-push
|
||||
services:
|
||||
- docker
|
||||
os: linux
|
||||
dist: focal
|
||||
script:
|
||||
# skopeo upstream image build
|
||||
- make -f release/Makefile build-image/upstream
|
||||
# Push image in case if build is started via cron job or code is pushed to master
|
||||
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
|
||||
make -f release/Makefile push-image/upstream ;
|
||||
fi
|
||||
|
||||
# skopeo stable image build
|
||||
- make -f release/Makefile build-image/stable
|
||||
# Push image in case if build is started via cron job or code is pushed to master
|
||||
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
|
||||
make -f release/Makefile push-image/stable ;
|
||||
fi
|
||||
|
||||
# Just declaration of stage order to run
|
||||
stages:
|
||||
# Test for local build
|
||||
- local-build
|
||||
# Build and push image for 1 architecture
|
||||
- name: image-build-push
|
||||
if: branch = master
|
||||
# Create and push image manifest to have multiarch image on top of architecture specific images
|
||||
- name: manifest-multiarch-push
|
||||
if: (type IN (push, cron)) AND branch = master
|
||||
|
||||
# Actual execution of steps
|
||||
jobs:
|
||||
include:
|
||||
# Run 2 local-build steps in parallel for osx and linux/amd64 platforms
|
||||
- stage: local-build
|
||||
<<: *local-build
|
||||
name: local build for osx
|
||||
os: osx
|
||||
osx_image: xcode11.3
|
||||
install:
|
||||
# Ideally, the (brew update) should not be necessary and Travis would have fairly
|
||||
# frequently updated OS images; that's not been the case historically.
|
||||
# In particular, explicitly unlink python@2, which has been removed from Homebrew
|
||||
# since the last OS image build (as of July 2020), but the Travis OS still
|
||||
# contains it, and it prevents updating of Python 3.
|
||||
- brew update && brew unlink python@2 && brew install gpgme
|
||||
script:
|
||||
- hack/travis_osx.sh
|
||||
- stage: local-build
|
||||
<<: *local-build
|
||||
name: local build for linux
|
||||
os: linux
|
||||
services:
|
||||
- docker
|
||||
script:
|
||||
- make vendor && ./hack/tree_status.sh && make local-cross && make check
|
||||
|
||||
# Run 3 image-build-push tasks in parallel for linux/amd64, linux/s390x and linux/ppc64le platforms (for upstream and stable)
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for amd64
|
||||
arch: amd64
|
||||
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for s390x
|
||||
arch: s390x
|
||||
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for ppc64le
|
||||
arch: ppc64le
|
||||
|
||||
# Run final task to generate multi-arch image manifests (for upstream and stable)
|
||||
- stage: manifest-multiarch-push
|
||||
os: linux
|
||||
dist: focal
|
||||
script:
|
||||
- make -f release/Makefile push-manifest-multiarch/upstream
|
||||
- make -f release/Makefile push-manifest-multiarch/stable
|
||||
|
||||
3
CODE-OF-CONDUCT.md
Normal file
3
CODE-OF-CONDUCT.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## The skopeo Project Community Code of Conduct
|
||||
|
||||
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
|
||||
173
CONTRIBUTING.md
Normal file
173
CONTRIBUTING.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Contributing to Skopeo
|
||||
|
||||
We'd love to have you join the community! Below summarizes the processes
|
||||
that we follow.
|
||||
|
||||
## Topics
|
||||
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [Communications](#communications)
|
||||
<!--
|
||||
* [Becoming a Maintainer](#becoming-a-maintainer)
|
||||
-->
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/containers/skopeo/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
|
||||
If you find a new issue with the project we'd love to hear about it! The most
|
||||
important aspect of a bug report is that it includes enough information for
|
||||
us to reproduce it. So, please include as much detail as possible and try
|
||||
to remove the extra stuff that doesn't really relate to the issue itself.
|
||||
The easier it is for us to reproduce it, the faster it'll be fixed!
|
||||
|
||||
Please don't include any private/sensitive information in your issue!
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
No Pull Request (PR) is too small! Typos, additional comments in the code,
|
||||
new testcases, bug fixes, new features, more documentation, ... it's all
|
||||
welcome!
|
||||
|
||||
While bug fixes can first be identified via an "issue", that is not required.
|
||||
It's ok to just open up a PR with the fix, but make sure you include the same
|
||||
information you would have included in an issue - like how to reproduce it.
|
||||
|
||||
PRs for new features should include some background on what use cases the
|
||||
new code is trying to address. When possible and when it makes sense, try to break-up
|
||||
larger PRs into smaller ones - it's easier to review smaller
|
||||
code changes. But only if those smaller ones make sense as stand-alone PRs.
|
||||
|
||||
Regardless of the type of PR, all PRs should include:
|
||||
* well documented code changes
|
||||
* additional testcases. Ideally, they should fail w/o your code change applied
|
||||
* documentation changes
|
||||
|
||||
Squash your commits into logical pieces of work that might want to be reviewed
|
||||
separate from the rest of the PRs. Ideally, each commit should implement a single
|
||||
idea, and the PR branch should pass the tests at every commit. GitHub makes it easy
|
||||
to review the cumulative effect of many commits; so, when in doubt, use smaller commits.
|
||||
|
||||
PRs that fix issues should include a reference like `Closes #XXXX` in the
|
||||
commit message so that github will automatically close the referenced issue
|
||||
when the PR is merged.
|
||||
|
||||
<!--
|
||||
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
|
||||
-->
|
||||
|
||||
### Sign your PRs
|
||||
|
||||
The sign-off is a line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now required by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete successfully again, merge the skopeo PR
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
IRC group on `irc.freenode.net` called `container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/containers/skopeo/issues)
|
||||
and
|
||||
[PRs](https://github.com/containers/skopeo/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
## Becoming a Maintainer
|
||||
|
||||
To become a maintainer you must first be nominated by an existing maintainer.
|
||||
If a majority (>50%) of maintainers agree then the proposal is adopted and
|
||||
you will be added to the list.
|
||||
|
||||
Removing a maintainer requires at least 75% of the remaining maintainers
|
||||
approval, or if the person requests to be removed then it is automatic.
|
||||
Normally, a maintainer will only be removed if they are considered to be
|
||||
inactive for a long period of time or are viewed as disruptive to the community.
|
||||
|
||||
The current list of maintainers can be found in the
|
||||
[MAINTAINERS](MAINTAINERS) file.
|
||||
-->
|
||||
54
Dockerfile
Normal file
54
Dockerfile
Normal file
@@ -0,0 +1,54 @@
|
||||
FROM fedora
|
||||
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2man \
|
||||
# storage deps
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
# gpgme bindings deps
|
||||
libassuan-devel gpgme-devel \
|
||||
gnupg \
|
||||
# htpasswd for system tests
|
||||
httpd-tools \
|
||||
# OpenShift deps
|
||||
which tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
bats jq podman runc \
|
||||
golint \
|
||||
openssl \
|
||||
&& dnf clean all
|
||||
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
RUN set -x \
|
||||
&& REGISTRY_COMMIT_SCHEMA1=ec87e9b6971d831f0eff752ddb54fb64693e51cd \
|
||||
&& REGISTRY_COMMIT=47a064d4195a9b56133891bbb13620c3ac83a827 \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
RUN set -x \
|
||||
&& export GOPATH=$(mktemp -d) \
|
||||
&& git clone --depth 1 -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
# The sed edits out a "go < 1.5" check which works incorrectly with go ≥ 1.10. \
|
||||
&& sed -i -e 's/\[\[ "\${go_version\[2]}" < "go1.5" ]]/false/' "$GOPATH/src/github.com/openshift/origin/hack/common.sh" \
|
||||
&& (cd "$GOPATH/src/github.com/openshift/origin" && make clean build && make all WHAT=cmd/dockerregistry) \
|
||||
&& cp -a "$GOPATH/src/github.com/openshift/origin/_output/local/bin/linux"/*/* /usr/local/bin \
|
||||
&& cp "$GOPATH/src/github.com/openshift/origin/images/dockerregistry/config.yml" /atomic-registry-config.yml \
|
||||
&& rm -rf "$GOPATH" \
|
||||
&& mkdir /registry
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
ENV container_magic 85531765-346b-4316-bdb8-358e4cca9e5d
|
||||
RUN go version
|
||||
WORKDIR /go/src/github.com/containers/skopeo
|
||||
COPY . /go/src/github.com/containers/skopeo
|
||||
|
||||
#ENTRYPOINT ["hack/dind"]
|
||||
12
Dockerfile.build
Normal file
12
Dockerfile.build
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
RUN dnf update -y && \
|
||||
dnf install -y \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
golang \
|
||||
gpgme-devel \
|
||||
make
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/containers/skopeo
|
||||
203
LICENSE
203
LICENSE
@@ -1,20 +1,189 @@
|
||||
Copyright (c) 2016 Antonio Murdaca <runcom@redhat.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
233
Makefile
233
Makefile
@@ -1,17 +1,226 @@
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
.PHONY: all binary build-container docs docs-in-container build-local clean install install-binary install-completions shell test-integration .install.vndr vendor vendor-in-container
|
||||
|
||||
BINDIR=${DESTDIR}/usr/bin/
|
||||
MANDIR=${DESTDIR}/usr/share/man
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
PREFIX ?= /usr/local
|
||||
|
||||
all:
|
||||
go build -o skopeo .
|
||||
go-md2man -in man/skopeo.1.md -out skopeo.1
|
||||
ifeq ($(shell uname),Darwin)
|
||||
DARWIN_BUILD_TAG=
|
||||
endif
|
||||
|
||||
install:
|
||||
install -d -m 0755 ${BINDIR}
|
||||
install -m 755 skopeo ${BINDIR}
|
||||
install -m 644 skopeo.1 ${MANDIR}/man1/
|
||||
# On some plaforms (eg. macOS, FreeBSD) gpgme is installed in /usr/local/ but /usr/local/include/ is
|
||||
# not in the default search path. Rather than hard-code this directory, use gpgme-config.
|
||||
# Sadly that must be done at the top-level user instead of locally in the gpgme subpackage, because cgo
|
||||
# supports only pkg-config, not general shell scripts, and gpgme does not install a pkg-config file.
|
||||
# If gpgme is not installed or gpgme-config can’t be found for other reasons, the error is silently ignored
|
||||
# (and the user will probably find out because the cgo compilation will fail).
|
||||
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
|
||||
|
||||
CONTAINERSCONFDIR=/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSCONFDIR}/registries.d
|
||||
SIGSTOREDIR=/var/lib/containers/sigstore
|
||||
BINDIR=${PREFIX}/bin
|
||||
MANDIR=${PREFIX}/share/man
|
||||
BASHCOMPLETIONSDIR=${PREFIX}/share/bash-completion/completions
|
||||
|
||||
GO ?= go
|
||||
GOBIN := $(shell $(GO) env GOBIN)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
# Required for integration-tests to detect they are running inside a specific
|
||||
# container image. Env. var defined in image, make does not automatically
|
||||
# pass to children unless explicitly exported
|
||||
export container_magic
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources.
|
||||
# See also hack/make.sh.
|
||||
ifeq ($(shell go help mod >/dev/null 2>&1 && echo true), true)
|
||||
GO:=GO111MODULE=on $(GO)
|
||||
MOD_VENDOR=-mod=vendor
|
||||
endif
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
ifeq ($(GOOS), linux)
|
||||
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
|
||||
GO_DYN_FLAGS="-buildmode=pie"
|
||||
endif
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
# set env like gobuildtag?
|
||||
CONTAINER_CMD := ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" #$(CONTAINER_ENVS)
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
|
||||
ifeq ($(INTERACTIVE), 1)
|
||||
CONTAINER_CMD += -t
|
||||
endif
|
||||
CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
override BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
|
||||
endif
|
||||
|
||||
# make all DEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: bin/skopeo docs
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo "Defaults to building bin/skopeo and docs"
|
||||
@echo
|
||||
@echo " * 'install' - Install binaries and documents to system locations"
|
||||
@echo " * 'binary' - Build skopeo with a container"
|
||||
@echo " * 'static' - Build statically linked binary"
|
||||
@echo " * 'bin/skopeo' - Build skopeo locally"
|
||||
@echo " * 'test-unit' - Execute unit tests"
|
||||
@echo " * 'test-integration' - Execute integration tests"
|
||||
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
|
||||
@echo " * 'check' - Including above validate, test-integration and test-unit"
|
||||
@echo " * 'shell' - Run the built image and attach to a shell"
|
||||
@echo " * 'clean' - Clean artifacts"
|
||||
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make bin/skopeo $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Update nix/nixpkgs.json its latest stable commit
|
||||
.PHONY: nixpkgs
|
||||
nixpkgs:
|
||||
@nix run \
|
||||
-f channel:nixos-20.09 nix-prefetch-git \
|
||||
-c nix-prefetch-git \
|
||||
--no-deepClone \
|
||||
https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
|
||||
|
||||
# Build statically linked binary
|
||||
.PHONY: static
|
||||
static:
|
||||
@nix build -f nix/
|
||||
mkdir -p ./bin
|
||||
cp -rfp ./result/bin/* ./bin/
|
||||
|
||||
# Build w/o using containers
|
||||
.PHONY: bin/skopeo
|
||||
bin/skopeo:
|
||||
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
bin/skopeo.%:
|
||||
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO) build $(MOD_VENDOR) ${SKOPEO_LDFLAGS} -tags "containers_image_openpgp $(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
local-cross: bin/skopeo.darwin.amd64 bin/skopeo.linux.arm bin/skopeo.linux.arm64 bin/skopeo.windows.386.exe bin/skopeo.windows.amd64.exe
|
||||
|
||||
build-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
$(MANPAGES): %: %.md
|
||||
sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
|
||||
docs: $(MANPAGES)
|
||||
|
||||
docs-in-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
clean:
|
||||
rm -f skopeo
|
||||
rm -f skopeo.1
|
||||
rm -rf bin docs/*.1
|
||||
|
||||
install: install-binary install-docs install-completions
|
||||
install -d -m 755 ${DESTDIR}/${SIGSTOREDIR}
|
||||
install -d -m 755 ${DESTDIR}/${CONTAINERSCONFIGDIR}
|
||||
install -m 644 default-policy.json ${DESTDIR}/${CONTAINERSCONFIGDIR}/policy.json
|
||||
install -d -m 755 ${DESTDIR}/${REGISTRIESDDIR}
|
||||
install -m 644 default.yaml ${DESTDIR}/${REGISTRIESDDIR}/default.yaml
|
||||
|
||||
install-binary: bin/skopeo
|
||||
install -d -m 755 ${DESTDIR}/${BINDIR}
|
||||
install -m 755 bin/skopeo ${DESTDIR}/${BINDIR}/skopeo
|
||||
|
||||
install-docs: docs
|
||||
install -d -m 755 ${DESTDIR}/${MANDIR}/man1
|
||||
install -m 644 docs/*.1 ${DESTDIR}/${MANDIR}/man1
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${DESTDIR}/${BASHCOMPLETIONSDIR}
|
||||
install -m 644 completions/bash/skopeo ${DESTDIR}/${BASHCOMPLETIONSDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(CONTAINER_RUN) bash
|
||||
|
||||
check: validate test-unit test-integration test-system
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-integration-local'
|
||||
|
||||
# Intended for CI, shortcut 'build-container' since already running inside container.
|
||||
test-integration-local:
|
||||
hack/make.sh test-integration
|
||||
|
||||
# complicated set of options needed to run podman-in-podman
|
||||
test-system: build-container
|
||||
DTEMP=$(shell mktemp -d --tmpdir=/var/tmp podman-tmp.XXXXXX); \
|
||||
$(CONTAINER_CMD) --privileged \
|
||||
-v $$DTEMP:/var/lib/containers:Z -v /run/systemd/journal/socket:/run/systemd/journal/socket \
|
||||
"$(IMAGE)" \
|
||||
bash -c 'BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-system-local'; \
|
||||
rc=$$?; \
|
||||
$(RM) -rf $$DTEMP; \
|
||||
exit $$rc
|
||||
|
||||
# Intended for CI, shortcut 'build-container' since already running inside container.
|
||||
test-system-local:
|
||||
hack/make.sh test-system
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
|
||||
validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor:
|
||||
$(GO) mod tidy
|
||||
$(GO) mod vendor
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor
|
||||
|
||||
278
README.md
278
README.md
@@ -1,109 +1,215 @@
|
||||
skopeo [](https://travis-ci.org/runcom/skopeo)
|
||||
skopeo [](https://travis-ci.org/containers/skopeo)
|
||||
=
|
||||
|
||||
_Please be aware `skopeo` is still work in progress_
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
`skopeo` is a command line utility which is able to _inspect_ a repository on a Docker registry.
|
||||
By _inspect_ I mean it fetches the repository's manifest and it is able to show you a `docker inspect`-like
|
||||
json output about a whole repository or a tag. This tool, in constrast to `docker inspect`, helps you gather useful information about
|
||||
a repository or a tag before pulling it (using disk space) - e.g. - which tags are available for the given repository? which labels the image has?
|
||||
----
|
||||
|
||||
`skopeo` is a command line utility that performs various operations on container images and image repositories.
|
||||
|
||||
`skopeo` does not require the user to be running as root to do most of its operations.
|
||||
|
||||
`skopeo` does not require a daemon to be running to perform its operations.
|
||||
|
||||
`skopeo` can work with [OCI images](https://github.com/opencontainers/image-spec) as well as the original Docker v2 images.
|
||||
|
||||
Skopeo works with API V2 container image registries such as [docker.io](https://docker.io) and [quay.io](https://quay.io) registries, private registries, local directories and local OCI-layout directories. Skopeo can perform operations which consist of:
|
||||
|
||||
* Copying an image from and to various storage mechanisms.
|
||||
For example you can copy images from one registry to another, without requiring privilege.
|
||||
* Inspecting a remote image showing its properties including its layers, without requiring you to pull the image to the host.
|
||||
* Deleting an image from an image repository.
|
||||
* Syncing an external image repository to an internal registry for air-gapped deployments.
|
||||
* When required by the repository, skopeo can pass the appropriate credentials and certificates for authentication.
|
||||
|
||||
Skopeo operates on the following image and repository types:
|
||||
|
||||
* containers-storage:docker-reference
|
||||
An image located in a local containers/storage image store. Both the location and image store are specified in /etc/containers/storage.conf. (This is the backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
|
||||
|
||||
* dir:path
|
||||
An existing local directory path storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
* docker://docker-reference
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `skopeo login`.
|
||||
|
||||
* docker-archive:path[:docker-reference]
|
||||
An image is stored in a `docker save`-formatted file. docker-reference is only used when creating such a file, and it must not contain a digest.
|
||||
|
||||
* docker-daemon:docker-reference
|
||||
An image docker-reference stored in the docker daemon internal storage. docker-reference must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
|
||||
|
||||
* oci:path:tag
|
||||
An image tag in a directory compliant with "Open Container Image Layout Specification" at path.
|
||||
|
||||
## Inspecting a repository
|
||||
`skopeo` is able to _inspect_ a repository on a container registry and fetch images layers.
|
||||
The _inspect_ command fetches the repository's manifest and it is able to show you a `docker inspect`-like
|
||||
json output about a whole repository or a tag. This tool, in contrast to `docker inspect`, helps you gather useful information about
|
||||
a repository or a tag before pulling it (using disk space). The inspect command can show you which tags are available for the given
|
||||
repository, the labels the image has, the creation date and operating system of the image and more.
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
# show repository's labels of rhel7:latest
|
||||
$ skopeo registry.access.redhat.com/rhel7 | jq '.Config.Labels'
|
||||
|
||||
#### Show properties of fedora:latest
|
||||
```console
|
||||
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest
|
||||
{
|
||||
"Architecture": "x86_64",
|
||||
"Authoritative_Registry": "registry.access.redhat.com",
|
||||
"BZComponent": "rhel-server-docker",
|
||||
"Build_Host": "rcm-img04.build.eng.bos.redhat.com",
|
||||
"Name": "rhel7/rhel",
|
||||
"Release": "38",
|
||||
"Vendor": "Red Hat, Inc.",
|
||||
"Version": "7.2"
|
||||
"Name": "registry.fedoraproject.org/fedora",
|
||||
"Digest": "sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9",
|
||||
"RepoTags": [
|
||||
"24",
|
||||
"25",
|
||||
"26-modular",
|
||||
...
|
||||
],
|
||||
"Created": "2020-04-29T06:48:16Z",
|
||||
"DockerVersion": "1.10.1",
|
||||
"Labels": {
|
||||
"license": "MIT",
|
||||
"name": "fedora",
|
||||
"vendor": "Fedora Project",
|
||||
"version": "32"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:3088721d7dbf674fc0be64cd3cf00c25aab921cacf35fa0e7b1578500a3e1653"
|
||||
],
|
||||
"Env": [
|
||||
"DISTTAG=f32container",
|
||||
"FGC=f32",
|
||||
"container=oci"
|
||||
]
|
||||
}
|
||||
|
||||
# show repository's tags
|
||||
$ skopeo docker.io/fedora | jq '.RepoTags'
|
||||
[
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
]
|
||||
|
||||
# show image's digest
|
||||
$ skopeo docker.io/fedora:rawhide | jq '.Digest'
|
||||
"sha256:905b4846938c8aef94f52f3e41a11398ae5b40f5855fb0e40ed9c157e721d7f8"
|
||||
```
|
||||
|
||||
Private registries with authentication
|
||||
-
|
||||
When interacting with private registries, `skopeo` first looks for the Docker's cli config file (usually located at `$HOME/.docker/config.json`) to get the credentials needed to authenticate. When the file isn't available it falls back looking for `--username` and `--password` flags. The ultimate fallback, as Docker does, is to provide an empty authentication when interacting with those registries.
|
||||
#### Show container configuration from `fedora:latest`
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
# on my system
|
||||
$ skopeo --help | grep docker-cfg
|
||||
--docker-cfg "/home/runcom/.docker" Docker's cli config for auth
|
||||
|
||||
$ cat /home/runcom/.docker/config.json
|
||||
```console
|
||||
$ skopeo inspect --config docker://registry.fedoraproject.org/fedora:latest | jq
|
||||
{
|
||||
"auths": {
|
||||
"myregistrydomain.com:5000": {
|
||||
"auth": "dGVzdHVzZXI6dGVzdHBhc3N3b3Jk",
|
||||
"email": "stuf@ex.cm"
|
||||
}
|
||||
}
|
||||
"created": "2020-04-29T06:48:16Z",
|
||||
"architecture": "amd64",
|
||||
"os": "linux",
|
||||
"config": {
|
||||
"Env": [
|
||||
"DISTTAG=f32container",
|
||||
"FGC=f32",
|
||||
"container=oci"
|
||||
],
|
||||
"Cmd": [
|
||||
"/bin/bash"
|
||||
],
|
||||
"Labels": {
|
||||
"license": "MIT",
|
||||
"name": "fedora",
|
||||
"vendor": "Fedora Project",
|
||||
"version": "32"
|
||||
}
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:a4c0fa2b217d3fd63d51e55a6fd59432e543d499c0df2b1acd48fbe424f2ddd1"
|
||||
]
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2020-04-29T06:48:16Z",
|
||||
"comment": "Created by Image Factory"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
#### Show unverified image's digest
|
||||
```console
|
||||
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest | jq '.Digest'
|
||||
"sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9"
|
||||
```
|
||||
|
||||
# we can see I'm already authenticated via docker login so everything will be fine
|
||||
$ skopeo myregistrydomain.com:5000/busybox
|
||||
## Copying images
|
||||
|
||||
`skopeo` can copy container images between various storage mechanisms, including:
|
||||
* Container registries
|
||||
|
||||
- The Quay, Docker Hub, OpenShift, GCR, Artifactory ...
|
||||
|
||||
* Container Storage backends
|
||||
|
||||
- [github.com/containers/storage](https://github.com/containers/storage) (Backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
|
||||
|
||||
- Docker daemon storage
|
||||
|
||||
* Local directories
|
||||
|
||||
* Local OCI-layout directories
|
||||
|
||||
```console
|
||||
$ skopeo copy docker://quay.io/buildah/stable docker://registry.internal.company.com/buildah
|
||||
$ skopeo copy oci:busybox_ocilayout:latest dir:existingemptydirectory
|
||||
```
|
||||
|
||||
## Deleting images
|
||||
```console
|
||||
$ skopeo delete docker://localhost:5000/imagename:latest
|
||||
```
|
||||
|
||||
## Syncing registries
|
||||
```console
|
||||
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
|
||||
```
|
||||
|
||||
## Authenticating to a registry
|
||||
|
||||
#### Private registries with authentication
|
||||
skopeo uses credentials from the --creds (for skopeo inspect|delete) or --src-creds|--dest-creds (for skopeo copy) flags, if set; otherwise it uses configuration set by skopeo login, podman login, buildah login, or docker login.
|
||||
|
||||
```console
|
||||
$ skopeo login --username USER myregistrydomain.com:5000
|
||||
Password:
|
||||
$ skopeo inspect docker://myregistrydomain.com:5000/busybox
|
||||
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
|
||||
$ skopeo logout myregistrydomain.com:5000
|
||||
```
|
||||
|
||||
# let's try now to fake a non existent Docker's config file
|
||||
$ skopeo --docker-cfg="" myregistrydomain.com:5000/busybox
|
||||
INFO[0000] Docker cli config file not found: stat : no such file or directory, falling back to --username and --password
|
||||
FATA[0000] unable to ping registry endpoint http://myregistrydomain.com:5000/v0/
|
||||
v2 ping attempt failed with error: Get http://myregistrydomain.com:5000/v2/: malformed HTTP response "\x15\x03\x01\x00\x02\x02"
|
||||
v1 ping attempt failed with error: Get http://myregistrydomain.com:5000/v1/_ping: malformed HTTP response "\x15\x03\x01\x00\x02\x02"
|
||||
#### Using --creds directly
|
||||
|
||||
# we can see the cli config isn't found so it looks for --username and --password
|
||||
# but because I didn't provide them I can't auth to the registry and I receive the above error
|
||||
INFO[0000] Docker cli config file not found: stat : no such file or directory, falling back to --username and --password
|
||||
|
||||
# passing --username and --password - we can see that everything goes fine despite an info showing
|
||||
# cli config can't be found
|
||||
$ skopeo --docker-cfg="" --username=testuser --password=testpassword myregistrydomain.com:5000/busybox
|
||||
INFO[0000] Docker cli config file not found: stat : no such file or directory, falling back to --username and --password
|
||||
```console
|
||||
$ skopeo inspect --creds=testuser:testpassword docker://myregistrydomain.com:5000/busybox
|
||||
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
|
||||
```
|
||||
If your cli config is found but it doesn't contain the necessary credentials for the queried registry
|
||||
you'll get an error. You can fix this by either logging in (via `docker login`) or providing `--username`
|
||||
and `--password`.
|
||||
Building
|
||||
-
|
||||
```sh
|
||||
$ git clone https://github.com/runcom/skopeo
|
||||
$ make
|
||||
```
|
||||
Installing
|
||||
-
|
||||
```sh
|
||||
$ sudo make install
|
||||
```
|
||||
TODO
|
||||
-
|
||||
- show repo tags via flag or when reference isn't tagged or digested
|
||||
- add tests (integration with deployed registries in container - Docker-like)
|
||||
|
||||
NOT TODO
|
||||
```console
|
||||
$ skopeo copy --src-creds=testuser:testpassword docker://myregistrydomain.com:5000/private oci:local_oci_image
|
||||
```
|
||||
|
||||
[Obtaining skopeo](./install.md)
|
||||
-
|
||||
- provide a _format_ flag - just use the awesome [jq](https://stedolan.github.io/jq/)
|
||||
|
||||
For a detailed description how to install or build skopeo, see
|
||||
[install.md](./install.md).
|
||||
|
||||
Contributing
|
||||
-
|
||||
|
||||
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------------------------|
|
||||
| [skopeo-copy(1)](/docs/skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](/docs/skopeo-delete.1.md) | Mark the image-name for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](/docs/skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-list-tags(1)](/docs/skopeo-list-tags.1.md) | Return a list of tags for the transport-specific image repository. |
|
||||
| [skopeo-login(1)](/docs/skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](/docs/skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
MIT
|
||||
skopeo is licensed under the Apache License, Version 2.0. See
|
||||
[LICENSE](LICENSE) for the full license text.
|
||||
|
||||
3
SECURITY.md
Normal file
3
SECURITY.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Security and Disclosure Information Policy for the skopeo Project
|
||||
|
||||
The skopeo Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
|
||||
34
cmd/skopeo/cgo_pthread_ordering_workaround.go
Normal file
34
cmd/skopeo/cgo_pthread_ordering_workaround.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// +build !containers_image_openpgp
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
This is a pretty horrible workaround. Due to a glibc bug
|
||||
https://bugzilla.redhat.com/show_bug.cgi?id=1326903 , we must ensure we link
|
||||
with -lgpgme before -lpthread. Such arguments come from various packages
|
||||
using cgo, and the ordering of these arguments is, with current (go tool link),
|
||||
dependent on the order in which the cgo-using packages are found in a
|
||||
breadth-first search following dependencies, starting from “main”.
|
||||
|
||||
Thus, if
|
||||
import "net"
|
||||
is processed before
|
||||
import "…/skopeo/signature"
|
||||
it will, in the next level of the BFS, pull in "runtime/cgo" (a dependency of
|
||||
"net") before "mtrmac/gpgme" (a dependency of "…/skopeo/signature"), causing
|
||||
-lpthread (used by "runtime/cgo") to be used before -lgpgme.
|
||||
|
||||
This might be possible to work around by careful import ordering, or by removing
|
||||
a direct dependency on "net", but that would be very fragile.
|
||||
|
||||
So, until the above bug is fixed, add -lgpgme directly in the "main" package
|
||||
to ensure the needed build order.
|
||||
|
||||
Unfortunately, this workaround needs to be applied at the top level of any user
|
||||
of "…/skopeo/signature"; it cannot be added to "…/skopeo/signature" itself,
|
||||
by that time this package is first processed by the linker, a -lpthread may
|
||||
already be queued and it would be too late.
|
||||
*/
|
||||
|
||||
// #cgo LDFLAGS: -lgpgme
|
||||
import "C"
|
||||
201
cmd/skopeo/copy.go
Normal file
201
cmd/skopeo/copy.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
enchelpers "github.com/containers/ocicrypt/helpers"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := copyOptions{global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "copy [command options] SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Short: "Copy an IMAGE-NAME from one location to another",
|
||||
Long: fmt.Sprintf(`Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&srcFlags)
|
||||
flags.AddFlagSet(&destFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
flags.StringSliceVar(&opts.additionalTags, "additional-tag", []string{}, "additional tags (supports docker-archive)")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress output information when copying images")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
flags.IntSliceVar(&opts.encryptLayer, "encrypt-layer", []int{}, "*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)")
|
||||
flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", []string{}, "*Experimental* key needed to decrypt the image")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
imageNames := args
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageNames[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(imageNames[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", imageNames[1], err)
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destinationCtx, err := opts.destImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
switch opts.format.value {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range opts.additionalTags {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if opts.quiet {
|
||||
stdout = nil
|
||||
}
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
}
|
||||
|
||||
if len(opts.encryptionKeys) > 0 && len(opts.decryptionKeys) > 0 {
|
||||
return fmt.Errorf("--encryption-key and --decryption-key cannot be specified together")
|
||||
}
|
||||
|
||||
var encLayers *[]int
|
||||
var encConfig *encconfig.EncryptConfig
|
||||
var decConfig *encconfig.DecryptConfig
|
||||
|
||||
if len(opts.encryptLayer) > 0 && len(opts.encryptionKeys) == 0 {
|
||||
return fmt.Errorf("--encrypt-layer can only be used with --encryption-key")
|
||||
}
|
||||
|
||||
if len(opts.encryptionKeys) > 0 {
|
||||
// encryption
|
||||
p := opts.encryptLayer
|
||||
encLayers = &p
|
||||
encryptionKeys := opts.encryptionKeys
|
||||
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid encryption keys: %v", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
|
||||
encConfig = cc.EncryptConfig
|
||||
}
|
||||
|
||||
if len(opts.decryptionKeys) > 0 {
|
||||
// decryption
|
||||
decryptionKeys := opts.decryptionKeys
|
||||
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid decryption keys: %v", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
|
||||
decConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
ImageListSelection: imageListSelection,
|
||||
OciDecryptConfig: decConfig,
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
})
|
||||
return err
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
75
cmd/skopeo/delete.go
Normal file
75
cmd/skopeo/delete.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
}
|
||||
|
||||
func deleteCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := deleteOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete [command options] IMAGE-NAME",
|
||||
Short: "Delete image IMAGE-NAME",
|
||||
Long: fmt.Sprintf(`Delete an "IMAGE_NAME" from a transport
|
||||
Supported transports:
|
||||
%s
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo delete docker://registry.example.com/example/pause:latest`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageName, err)
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
4
cmd/skopeo/fixtures/.gitignore
vendored
Normal file
4
cmd/skopeo/fixtures/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
/*.gpg~
|
||||
/.gpg-v21-migrated
|
||||
/private-keys-v1.d
|
||||
/random_seed
|
||||
BIN
cmd/skopeo/fixtures/corrupt.signature
Normal file
BIN
cmd/skopeo/fixtures/corrupt.signature
Normal file
Binary file not shown.
26
cmd/skopeo/fixtures/image.manifest.json
Normal file
26
cmd/skopeo/fixtures/image.manifest.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 7023,
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 16724,
|
||||
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 73109,
|
||||
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
|
||||
}
|
||||
]
|
||||
}
|
||||
BIN
cmd/skopeo/fixtures/image.signature
Normal file
BIN
cmd/skopeo/fixtures/image.signature
Normal file
Binary file not shown.
BIN
cmd/skopeo/fixtures/pubring.gpg
Normal file
BIN
cmd/skopeo/fixtures/pubring.gpg
Normal file
Binary file not shown.
BIN
cmd/skopeo/fixtures/secring.gpg
Normal file
BIN
cmd/skopeo/fixtures/secring.gpg
Normal file
Binary file not shown.
BIN
cmd/skopeo/fixtures/trustdb.gpg
Normal file
BIN
cmd/skopeo/fixtures/trustdb.gpg
Normal file
Binary file not shown.
11
cmd/skopeo/fixtures/v2s1-invalid-signatures.manifest.json
Normal file
11
cmd/skopeo/fixtures/v2s1-invalid-signatures.manifest.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "mitr/buxybox",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
],
|
||||
"history": [
|
||||
],
|
||||
"signatures": 1
|
||||
}
|
||||
129
cmd/skopeo/flag.go
Normal file
129
cmd/skopeo/flag.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// optionalBool is a boolean with a separate presence flag.
|
||||
type optionalBool struct {
|
||||
present bool
|
||||
value bool
|
||||
}
|
||||
|
||||
// optionalBool is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Bool, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalBool to (pretend to) force callers to use
|
||||
// optionalBoolFlag
|
||||
type optionalBoolValue optionalBool
|
||||
|
||||
func optionalBoolFlag(fs *pflag.FlagSet, p *optionalBool, name, usage string) *pflag.Flag {
|
||||
flag := fs.VarPF(internalNewOptionalBoolValue(p), name, "", usage)
|
||||
flag.NoOptDefVal = "true"
|
||||
return flag
|
||||
}
|
||||
|
||||
// WARNING: Do not directly use this method to define optionalBool flag.
|
||||
// Caller should use optionalBoolFlag
|
||||
func internalNewOptionalBoolValue(p *optionalBool) pflag.Value {
|
||||
p.present = false
|
||||
return (*optionalBoolValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ob.value = v
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true
|
||||
}
|
||||
return strconv.FormatBool(ob.value)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) Type() string {
|
||||
return "bool"
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optionalString is a string with a separate presence flag.
|
||||
type optionalString struct {
|
||||
present bool
|
||||
value string
|
||||
}
|
||||
|
||||
// optionalString is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.String, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalString to (pretend to) force callers to use
|
||||
// newoptionalString
|
||||
type optionalStringValue optionalString
|
||||
|
||||
func newOptionalStringValue(p *optionalString) pflag.Value {
|
||||
p.present = false
|
||||
return (*optionalStringValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) Set(s string) error {
|
||||
ob.value = s
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""}
|
||||
}
|
||||
return ob.value
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// optionalInt is a int with a separate presence flag.
|
||||
type optionalInt struct {
|
||||
present bool
|
||||
value int
|
||||
}
|
||||
|
||||
// optionalInt is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Int, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalInt to (pretend to) force callers to use
|
||||
// newoptionalIntValue
|
||||
type optionalIntValue optionalInt
|
||||
|
||||
func newOptionalIntValue(p *optionalInt) pflag.Value {
|
||||
p.present = false
|
||||
return (*optionalIntValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalIntValue) Set(s string) error {
|
||||
v, err := strconv.ParseInt(s, 0, strconv.IntSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ob.value = int(v)
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalIntValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // If the value is not present, just return an empty string, any other value wouldn't make sense.
|
||||
}
|
||||
return strconv.Itoa(int(ob.value))
|
||||
}
|
||||
|
||||
func (ob *optionalIntValue) Type() string {
|
||||
return "int"
|
||||
}
|
||||
222
cmd/skopeo/flag_test.go
Normal file
222
cmd/skopeo/flag_test.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOptionalBoolSet(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input string
|
||||
accepted bool
|
||||
value bool
|
||||
}{
|
||||
// Valid inputs documented for strconv.ParseBool == flag.BoolVar
|
||||
{"1", true, true},
|
||||
{"t", true, true},
|
||||
{"T", true, true},
|
||||
{"TRUE", true, true},
|
||||
{"true", true, true},
|
||||
{"True", true, true},
|
||||
{"0", true, false},
|
||||
{"f", true, false},
|
||||
{"F", true, false},
|
||||
{"FALSE", true, false},
|
||||
{"false", true, false},
|
||||
{"False", true, false},
|
||||
// A few invalid inputs
|
||||
{"", false, false},
|
||||
{"yes", false, false},
|
||||
{"no", false, false},
|
||||
{"2", false, false},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := internalNewOptionalBoolValue(&ob)
|
||||
require.False(t, ob.present)
|
||||
err := v.Set(c.input)
|
||||
if c.accepted {
|
||||
assert.NoError(t, err, c.input)
|
||||
assert.Equal(t, c.value, ob.value)
|
||||
} else {
|
||||
assert.Error(t, err, c.input)
|
||||
assert.False(t, ob.present) // Just to be extra paranoid.
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOB, commandOB optionalBool
|
||||
actionRun := false
|
||||
app := &cobra.Command{
|
||||
Use: "app",
|
||||
}
|
||||
optionalBoolFlag(app.PersistentFlags(), &globalOB, "global-OB", "")
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.False(t, globalOB.present)
|
||||
assert.False(t, commandOB.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
optionalBoolFlag(cmd.Flags(), &commandOB, "command-OB", "")
|
||||
app.AddCommand(cmd)
|
||||
app.SetArgs([]string{"cmd"})
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalBoolString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalBool
|
||||
expected string
|
||||
}{
|
||||
{optionalBool{present: true, value: true}, "true"},
|
||||
{optionalBool{present: true, value: false}, "false"},
|
||||
{optionalBool{present: false, value: true}, ""},
|
||||
{optionalBool{present: false, value: false}, ""},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := internalNewOptionalBoolValue(&ob)
|
||||
ob = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBoolIsBoolFlag(t *testing.T) {
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOB optionalBool
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalBool{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OB=true", "1", "2"}, optionalBool{present: true, value: true}, []string{"1", "2"}}, // --OB=true
|
||||
{[]string{"--OB=false", "1", "2"}, optionalBool{present: true, value: false}, []string{"1", "2"}}, // --OB=false
|
||||
{[]string{"--OB", "true", "1", "2"}, optionalBool{present: true, value: true}, []string{"true", "1", "2"}}, // --OB true
|
||||
{[]string{"--OB", "false", "1", "2"}, optionalBool{present: true, value: true}, []string{"false", "1", "2"}}, // --OB false
|
||||
} {
|
||||
var ob optionalBool
|
||||
actionRun := false
|
||||
app := &cobra.Command{Use: "app"}
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.Equal(t, c.expectedOB, ob)
|
||||
assert.Equal(t, c.expectedArgs, args)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
optionalBoolFlag(cmd.Flags(), &ob, "OB", "")
|
||||
app.AddCommand(cmd)
|
||||
|
||||
app.SetArgs(append([]string{"cmd"}, c.input...))
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringSet(t *testing.T) {
|
||||
// Really just a smoke test, but differentiating between not present and empty.
|
||||
for _, c := range []string{"", "hello"} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
require.False(t, os.present)
|
||||
err := v.Set(c)
|
||||
assert.NoError(t, err, c)
|
||||
assert.Equal(t, c, os.value)
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOS, commandOS optionalString
|
||||
actionRun := false
|
||||
app := &cobra.Command{
|
||||
Use: "app",
|
||||
}
|
||||
app.PersistentFlags().Var(newOptionalStringValue(&globalOS), "global-OS", "")
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.False(t, globalOS.present)
|
||||
assert.False(t, commandOS.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().Var(newOptionalStringValue(&commandOS), "command-OS", "")
|
||||
app.AddCommand(cmd)
|
||||
app.SetArgs([]string{"cmd"})
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalStringString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalString
|
||||
expected string
|
||||
}{
|
||||
{optionalString{present: true, value: "hello"}, "hello"},
|
||||
{optionalString{present: true, value: ""}, ""},
|
||||
{optionalString{present: false, value: "hello"}, ""},
|
||||
{optionalString{present: false, value: ""}, ""},
|
||||
} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
os = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringIsBoolFlag(t *testing.T) {
|
||||
// NOTE: optionalStringValue does not implement IsBoolFlag!
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOS optionalString
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalString{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OS=hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS=true
|
||||
{[]string{"--OS=", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS=false
|
||||
{[]string{"--OS", "hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS true
|
||||
{[]string{"--OS", "", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS false
|
||||
} {
|
||||
var os optionalString
|
||||
actionRun := false
|
||||
app := &cobra.Command{
|
||||
Use: "app",
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.Equal(t, c.expectedOS, os)
|
||||
assert.Equal(t, c.expectedArgs, args)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().Var(newOptionalStringValue(&os), "OS", "")
|
||||
app.AddCommand(cmd)
|
||||
app.SetArgs(append([]string{"cmd"}, c.input...))
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
239
cmd/skopeo/inspect.go
Normal file
239
cmd/skopeo/inspect.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/common/pkg/report"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/cmd/skopeo/inspect"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := inspectOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [command options] IMAGE-NAME",
|
||||
Short: "Inspect image IMAGE-NAME",
|
||||
Long: fmt.Sprintf(`Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo inspect docker://registry.fedoraproject.org/fedora
|
||||
skopeo inspect --config docker://docker.io/alpine
|
||||
skopeo inspect --format "Name: {{.Name}} Digest: {{.Digest}}" docker://registry.access.redhat.com/ubi8`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.raw, "raw", false, "output raw manifest or configuration")
|
||||
flags.BoolVar(&opts.config, "config", false, "output configuration")
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output to a Go template")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
var (
|
||||
rawManifest []byte
|
||||
src types.ImageSource
|
||||
imgInspect *types.ImageInspectInfo
|
||||
data []interface{}
|
||||
)
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
if opts.raw && opts.format != "" {
|
||||
return errors.New("raw output does not support format option")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
src, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error parsing image name %q", imageName)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
rawManifest, _, err = src.GetManifest(ctx, nil)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving manifest for image")
|
||||
}
|
||||
|
||||
if opts.raw && !opts.config {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error parsing manifest for image")
|
||||
}
|
||||
|
||||
if opts.config && opts.raw {
|
||||
var configBlob []byte
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
configBlob, err = img.ConfigBlob(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error reading configuration blob")
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error writing configuration blob to standard output")
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
var config *v1.Image
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
config, err = img.OCIConfig(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error reading OCI-formatted configuration data")
|
||||
}
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
var out []byte
|
||||
out, err = json.MarshalIndent(config, "", " ")
|
||||
if err == nil {
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
}
|
||||
} else {
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, config)
|
||||
err = printTmpl(row, data)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error writing OCI-formatted configuration data to standard output")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
imgInspect, err = img.Inspect(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outputData := inspect.Output{
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overridden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
Env: imgInspect.Env,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error computing manifest digest")
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
|
||||
// action is not allowed.
|
||||
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
|
||||
return errors.Wrapf(err, "Error determining repository tags")
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err == nil {
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
}
|
||||
return err
|
||||
}
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, outputData)
|
||||
return printTmpl(row, data)
|
||||
}
|
||||
|
||||
func inspectNormalize(row string) string {
|
||||
r := strings.NewReplacer(
|
||||
".ImageID", ".Image",
|
||||
)
|
||||
return r.Replace(row)
|
||||
}
|
||||
|
||||
func printTmpl(row string, data []interface{}) error {
|
||||
t, err := template.New("skopeo inspect").Parse(row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
|
||||
return t.Execute(w, data)
|
||||
}
|
||||
23
cmd/skopeo/inspect/output.go
Normal file
23
cmd/skopeo/inspect/output.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package inspect
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Output is the output format of (skopeo inspect),
|
||||
// primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type Output struct {
|
||||
Name string `json:",omitempty"`
|
||||
Tag string `json:",omitempty"`
|
||||
Digest digest.Digest
|
||||
RepoTags []string
|
||||
Created *time.Time
|
||||
DockerVersion string
|
||||
Labels map[string]string
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
Env []string
|
||||
}
|
||||
175
cmd/skopeo/layers.go
Normal file
175
cmd/skopeo/layers.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := layersOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Hidden: true,
|
||||
Use: "layers [command options] IMAGE-NAME [LAYER...]",
|
||||
Short: "Get layers of IMAGE-NAME",
|
||||
RunE: commandAction(opts.run),
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if len(args) == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
var (
|
||||
rawSource types.ImageSource
|
||||
src types.ImageCloser
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
rawSource, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
src, err = image.FromSource(ctx, sys, rawSource)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range args[1:] {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
var (
|
||||
r io.ReadCloser
|
||||
blobSize int64
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var manifest []byte
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
manifest, _, err = src.Manifest(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dest.Commit(ctx, image.UnparsedInstance(rawSource, nil))
|
||||
}
|
||||
147
cmd/skopeo/list_tags.go
Normal file
147
cmd/skopeo/list_tags.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// tagListOutput is the output format of (skopeo list-tags), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type tagListOutput struct {
|
||||
Repository string
|
||||
Tags []string
|
||||
}
|
||||
|
||||
type tagsOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
}
|
||||
|
||||
func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
|
||||
opts := tagsOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "list-tags [command options] REPOSITORY-NAME",
|
||||
Short: "List tags in the transport/repository specified by the REPOSITORY-NAME",
|
||||
Long: `Return the list of tags from the transport/repository "REPOSITORY-NAME"
|
||||
|
||||
Supported transports:
|
||||
docker
|
||||
|
||||
See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
|
||||
`,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo list-tags docker://docker.io/fedora`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Customized version of the alltransports.ParseImageName and docker.ParseReference that does not place a default tag in the reference
|
||||
// Would really love to not have this, but needed to enforce tag-less and digest-less names
|
||||
func parseDockerRepositoryReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, docker.Transport.Name()+"://") {
|
||||
return nil, errors.Errorf("docker: image reference %s does not start with %s://", refString, docker.Transport.Name())
|
||||
}
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, refString)
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(parts[1], "//"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return nil, errors.New(`No tag or digest allowed in reference`)
|
||||
}
|
||||
|
||||
// Checks ok, now return a reference. This is a hack because the tag listing code expects a full image reference even though the tag is ignored
|
||||
return docker.NewReference(reference.TagNameOnly(ref))
|
||||
}
|
||||
|
||||
// List the tags from a repository contained in the imgRef reference. Any tag value in the reference is ignored
|
||||
func listDockerTags(ctx context.Context, sys *types.SystemContext, imgRef types.ImageReference) (string, []string, error) {
|
||||
repositoryName := imgRef.DockerReference().Name()
|
||||
|
||||
tags, err := docker.GetRepositoryTags(ctx, sys, imgRef)
|
||||
if err != nil {
|
||||
return ``, nil, fmt.Errorf("Error listing repository tags: %v", err)
|
||||
}
|
||||
return repositoryName, tags, nil
|
||||
}
|
||||
|
||||
func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly one non-option argument expected")}
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transport := alltransports.TransportFromImageName(args[0])
|
||||
if transport == nil {
|
||||
return fmt.Errorf("Invalid %q: does not specify a transport", args[0])
|
||||
}
|
||||
|
||||
if transport.Name() != docker.Transport.Name() {
|
||||
return fmt.Errorf("Unsupported transport '%v' for tag listing. Only '%v' currently supported", transport.Name(), docker.Transport.Name())
|
||||
}
|
||||
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var repositoryName string
|
||||
var tagListing []string
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outputData := tagListOutput{
|
||||
Repository: repositoryName,
|
||||
Tags: tagListing,
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
|
||||
return err
|
||||
}
|
||||
57
cmd/skopeo/list_tags_test.go
Normal file
57
cmd/skopeo/list_tags_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Tests the kinds of inputs allowed and expected to the command
|
||||
func TestDockerRepositoryReferenceParser(t *testing.T) {
|
||||
for _, test := range [][]string{
|
||||
{"docker://myhost.com:1000/nginx"}, //no tag
|
||||
{"docker://myhost.com/nginx"}, //no port or tag
|
||||
{"docker://somehost.com"}, // Valid default expansion
|
||||
{"docker://nginx"}, // Valid default expansion
|
||||
} {
|
||||
|
||||
ref, err := parseDockerRepositoryReference(test[0])
|
||||
expected, err := alltransports.ParseImageName(test[0])
|
||||
if assert.NoError(t, err, "Could not parse, got error on %v", test[0]) {
|
||||
assert.Equal(t, expected.DockerReference().Name(), ref.DockerReference().Name(), "Mismatched parse result for input %v", test[0])
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range [][]string{
|
||||
{"oci://somedir"},
|
||||
{"dir:/somepath"},
|
||||
{"docker-archive:/tmp/dir"},
|
||||
{"container-storage:myhost.com/someimage"},
|
||||
{"docker-daemon:myhost.com/someimage"},
|
||||
{"docker://myhost.com:1000/nginx:foobar:foobar"}, // Invalid repository ref
|
||||
{"docker://somehost.com:5000/"}, // no repo
|
||||
{"docker://myhost.com:1000/nginx:latest"}, //tag not allowed
|
||||
{"docker://myhost.com:1000/nginx@sha256:abcdef1234567890"}, //digest not allowed
|
||||
} {
|
||||
_, err := parseDockerRepositoryReference(test[0])
|
||||
assert.Error(t, err, test[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerRepositoryReferenceParserDrift(t *testing.T) {
|
||||
for _, test := range [][]string{
|
||||
{"docker://myhost.com:1000/nginx", "myhost.com:1000/nginx"}, //no tag
|
||||
{"docker://myhost.com/nginx", "myhost.com/nginx"}, //no port or tag
|
||||
{"docker://somehost.com", "docker.io/library/somehost.com"}, // Valid default expansion
|
||||
{"docker://nginx", "docker.io/library/nginx"}, // Valid default expansion
|
||||
} {
|
||||
|
||||
ref, err := parseDockerRepositoryReference(test[0])
|
||||
ref2, err2 := alltransports.ParseImageName(test[0])
|
||||
|
||||
if assert.NoError(t, err, "Could not parse, got error on %v", test[0]) && assert.NoError(t, err2, "Could not parse with regular parser, got error on %v", test[0]) {
|
||||
assert.Equal(t, ref.DockerReference().String(), ref2.DockerReference().String(), "Different parsing output for input %v. Repo parse = %v, regular parser = %v", test[0], ref, ref2)
|
||||
}
|
||||
}
|
||||
}
|
||||
47
cmd/skopeo/login.go
Normal file
47
cmd/skopeo/login.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/common/pkg/auth"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type loginOptions struct {
|
||||
global *globalOptions
|
||||
loginOpts auth.LoginOptions
|
||||
getLogin optionalBool
|
||||
tlsVerify optionalBool
|
||||
}
|
||||
|
||||
func loginCmd(global *globalOptions) *cobra.Command {
|
||||
opts := loginOptions{
|
||||
global: global,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "login",
|
||||
Short: "Login to a container registry",
|
||||
Long: "Login to a container registry on a specified server.",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo login quay.io`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
optionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
flags.AddFlagSet(auth.GetLoginFlags(&opts.loginOpts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *loginOptions) run(args []string, stdout io.Writer) error {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
opts.loginOpts.Stdout = stdout
|
||||
opts.loginOpts.Stdin = os.Stdin
|
||||
sys := opts.global.newSystemContext()
|
||||
if opts.tlsVerify.present {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
return auth.Login(ctx, sys, &opts.loginOpts, args)
|
||||
}
|
||||
35
cmd/skopeo/logout.go
Normal file
35
cmd/skopeo/logout.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/common/pkg/auth"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type logoutOptions struct {
|
||||
global *globalOptions
|
||||
logoutOpts auth.LogoutOptions
|
||||
}
|
||||
|
||||
func logoutCmd(global *globalOptions) *cobra.Command {
|
||||
opts := logoutOptions{
|
||||
global: global,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "logout",
|
||||
Short: "Logout of a container registry",
|
||||
Long: "Logout of a container registry on a specified server.",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo logout quay.io`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
cmd.Flags().AddFlagSet(auth.GetLogoutFlags(&opts.logoutOpts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *logoutOptions) run(args []string, stdout io.Writer) error {
|
||||
opts.logoutOpts.Stdout = stdout
|
||||
sys := opts.global.newSystemContext()
|
||||
return auth.Logout(sys, &opts.logoutOpts, args)
|
||||
}
|
||||
155
cmd/skopeo/main.go
Normal file
155
cmd/skopeo/main.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// gitCommit will be the hash that the binary was built from
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
var defaultUserAgent = "skopeo/" + version.Version
|
||||
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuration directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
overrideVariant string // Architecture variant to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
tmpDir string // Path to use for big temporary files
|
||||
}
|
||||
|
||||
// createApp returns a cobra.Command, and the underlying globalOptions object, to be run or tested.
|
||||
func createApp() (*cobra.Command, *globalOptions) {
|
||||
opts := globalOptions{}
|
||||
|
||||
rootCommand := &cobra.Command{
|
||||
Use: "skopeo",
|
||||
Long: "Various operations with container images and container image registries",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return opts.before(cmd)
|
||||
},
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
}
|
||||
if gitCommit != "" {
|
||||
rootCommand.Version = fmt.Sprintf("%s commit: %s", version.Version, gitCommit)
|
||||
} else {
|
||||
rootCommand.Version = version.Version
|
||||
}
|
||||
// Override default `--version` global flag to enable `-v` shorthand
|
||||
var dummyVersion bool
|
||||
rootCommand.Flags().BoolVarP(&dummyVersion, "version", "v", false, "Version for Skopeo")
|
||||
rootCommand.PersistentFlags().BoolVar(&opts.debug, "debug", false, "enable debug output")
|
||||
flag := optionalBoolFlag(rootCommand.PersistentFlags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
|
||||
flag.Hidden = true
|
||||
rootCommand.PersistentFlags().StringVar(&opts.policyPath, "policy", "", "Path to a trust policy file")
|
||||
rootCommand.PersistentFlags().BoolVar(&opts.insecurePolicy, "insecure-policy", false, "run the tool without any policy check")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.registriesDirPath, "registries.d", "", "use registry configuration files in `DIR` (e.g. for container signature storage)")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.overrideArch, "override-arch", "", "use `ARCH` instead of the architecture of the machine for choosing images")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.overrideOS, "override-os", "", "use `OS` instead of the running OS for choosing images")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.overrideVariant, "override-variant", "", "use `VARIANT` instead of the running architecture variant for choosing images")
|
||||
rootCommand.PersistentFlags().DurationVar(&opts.commandTimeout, "command-timeout", 0, "timeout for the command execution")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.registriesConfPath, "registries-conf", "", "path to the registries.conf file")
|
||||
if err := rootCommand.PersistentFlags().MarkHidden("registries-conf"); err != nil {
|
||||
logrus.Fatal("unable to mark registries-conf flag as hidden")
|
||||
}
|
||||
rootCommand.PersistentFlags().StringVar(&opts.tmpDir, "tmpdir", "", "directory used to store temporary files")
|
||||
rootCommand.AddCommand(
|
||||
copyCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
inspectCmd(&opts),
|
||||
layersCmd(&opts),
|
||||
loginCmd(&opts),
|
||||
logoutCmd(&opts),
|
||||
manifestDigestCmd(),
|
||||
syncCmd(&opts),
|
||||
standaloneSignCmd(),
|
||||
standaloneVerifyCmd(),
|
||||
tagsCmd(&opts),
|
||||
untrustedSignatureDumpCmd(),
|
||||
)
|
||||
return rootCommand, &opts
|
||||
}
|
||||
|
||||
// before is run by the cli package for any command, before running the command-specific handler.
|
||||
func (opts *globalOptions) before(cmd *cobra.Command) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
rootCmd, _ := createApp()
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// getPolicyContext returns a *signature.PolicyContext based on opts.
|
||||
func (opts *globalOptions) getPolicyContext() (*signature.PolicyContext, error) {
|
||||
var policy *signature.Policy // This could be cached across calls in opts.
|
||||
var err error
|
||||
if opts.insecurePolicy {
|
||||
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
||||
} else if opts.policyPath == "" {
|
||||
policy, err = signature.DefaultPolicy(nil)
|
||||
} else {
|
||||
policy, err = signature.NewPolicyFromFile(opts.policyPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
// commandTimeoutContext returns a context.Context and a cancellation callback based on opts.
|
||||
// The caller should usually "defer cancel()" immediately after calling this.
|
||||
func (opts *globalOptions) commandTimeoutContext() (context.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc = func() {}
|
||||
if opts.commandTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, opts.commandTimeout)
|
||||
}
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *globalOptions) newSystemContext() *types.SystemContext {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: opts.registriesDirPath,
|
||||
ArchitectureChoice: opts.overrideArch,
|
||||
OSChoice: opts.overrideOS,
|
||||
VariantChoice: opts.overrideVariant,
|
||||
SystemRegistriesConfPath: opts.registriesConfPath,
|
||||
BigFilesTemporaryDir: opts.tmpDir,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
51
cmd/skopeo/main_test.go
Normal file
51
cmd/skopeo/main_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// runSkopeo creates an app object and runs it with args, with an implied first "skopeo".
|
||||
// Returns output intended for stdout and the returned error, if any.
|
||||
func runSkopeo(args ...string) (string, error) {
|
||||
app, _ := createApp()
|
||||
stdout := bytes.Buffer{}
|
||||
app.SetOut(&stdout)
|
||||
app.SetArgs(args)
|
||||
err := app.Execute()
|
||||
return stdout.String(), err
|
||||
}
|
||||
|
||||
func TestGlobalOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts, _ := fakeGlobalOptions(t, []string{})
|
||||
res := opts.newSystemContext()
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
// User-Agent is set by default.
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
// Set everything to non-default values.
|
||||
opts, _ = fakeGlobalOptions(t, []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
"--override-variant", "overridden-variant",
|
||||
"--tmpdir", "/srv",
|
||||
"--registries-conf", "/srv/registries.conf",
|
||||
"--tls-verify=false",
|
||||
})
|
||||
res = opts.newSystemContext()
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
SystemRegistriesConfPath: "/srv/registries.conf",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
}
|
||||
44
cmd/skopeo/manifest.go
Normal file
44
cmd/skopeo/manifest.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type manifestDigestOptions struct {
|
||||
}
|
||||
|
||||
func manifestDigestCmd() *cobra.Command {
|
||||
var opts manifestDigestOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "manifest-digest MANIFEST",
|
||||
Short: "Compute a manifest digest of a file",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: "skopeo manifest-digest manifest.json",
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo manifest-digest manifest")
|
||||
}
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
digest, err := manifest.Digest(man)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing digest: %v", err)
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", digest)
|
||||
return nil
|
||||
}
|
||||
31
cmd/skopeo/manifest_test.go
Normal file
31
cmd/skopeo/manifest_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestManifestDigest(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"manifest-digest"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("manifest-digest", "/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error computing manifest
|
||||
out, err = runSkopeo("manifest-digest", "fixtures/v2s1-invalid-signatures.manifest.json")
|
||||
assertTestFailed(t, out, err, "computing digest")
|
||||
|
||||
// Success
|
||||
out, err = runSkopeo("manifest-digest", "fixtures/image.manifest.json")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest.String()+"\n", out)
|
||||
}
|
||||
148
cmd/skopeo/signing.go
Normal file
148
cmd/skopeo/signing.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type standaloneSignOptions struct {
|
||||
output string // Output file path
|
||||
}
|
||||
|
||||
func standaloneSignCmd() *cobra.Command {
|
||||
opts := standaloneSignOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "standalone-sign [command options] MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Short: "Create a signature using local files",
|
||||
RunE: commandAction(opts.run),
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.output, "output", "o", "", "output the signature to `SIGNATURE`")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 3 || opts.output == "" {
|
||||
return errors.New("Usage: skopeo standalone-sign manifest docker-reference key-fingerprint -o signature")
|
||||
}
|
||||
manifestPath := args[0]
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading %s: %v", manifestPath, err)
|
||||
}
|
||||
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
signature, err := signature.SignDockerManifest(manifest, dockerReference, mech, fingerprint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type standaloneVerifyOptions struct {
|
||||
}
|
||||
|
||||
func standaloneVerifyCmd() *cobra.Command {
|
||||
opts := standaloneVerifyOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "standalone-verify MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Short: "Verify a signature using local files",
|
||||
RunE: commandAction(opts.run),
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 4 {
|
||||
return errors.New("Usage: skopeo standalone-verify manifest docker-reference key-fingerprint signature")
|
||||
}
|
||||
manifestPath := args[0]
|
||||
expectedDockerReference := args[1]
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
unverifiedSignature, err := ioutil.ReadFile(signaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signature from %s: %v", signaturePath, err)
|
||||
}
|
||||
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
sig, err := signature.VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, expectedFingerprint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error verifying signature: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(stdout, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
type untrustedSignatureDumpOptions struct {
|
||||
}
|
||||
|
||||
func untrustedSignatureDumpCmd() *cobra.Command {
|
||||
opts := untrustedSignatureDumpOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "untrusted-signature-dump-without-verification SIGNATURE",
|
||||
Short: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
RunE: commandAction(opts.run),
|
||||
Hidden: true,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo untrusted-signature-dump-without-verification signature")
|
||||
}
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading untrusted signature from %s: %v", untrustedSignaturePath, err)
|
||||
}
|
||||
|
||||
untrustedInfo, err := signature.GetUntrustedSignatureInformationWithoutVerifying(untrustedSignature)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error decoding untrusted signature: %v", err)
|
||||
}
|
||||
untrustedOut, err := json.MarshalIndent(untrustedInfo, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(stdout, string(untrustedOut))
|
||||
return nil
|
||||
}
|
||||
178
cmd/skopeo/signing_test.go
Normal file
178
cmd/skopeo/signing_test.go
Normal file
@@ -0,0 +1,178 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// fixturesTestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
|
||||
fixturesTestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
|
||||
// fixturesTestKeyFingerprint is the fingerprint of the private key.
|
||||
fixturesTestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
|
||||
// fixturesTestKeyFingerprint is the key ID of the private key.
|
||||
fixturesTestKeyShortID = "DB72F2188BB46CC8"
|
||||
)
|
||||
|
||||
// Test that results of runSkopeo failed with nothing on stdout, and substring
|
||||
// within the error message.
|
||||
func assertTestFailed(t *testing.T, stdout string, err error, substring string) {
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, stdout)
|
||||
assert.Contains(t, err.Error(), substring)
|
||||
}
|
||||
|
||||
func TestStandaloneSign(t *testing.T) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
require.NoError(t, err)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
t.Skipf("Signing not supported: %v", err)
|
||||
}
|
||||
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
{"a1", "a2", "a3"},
|
||||
{"a1", "a2", "a3", "a4"},
|
||||
{"-o", "o", "a1", "a2"},
|
||||
{"-o", "o", "a1", "a2", "a3", "a4"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"standalone-sign"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
"/this/does/not/exist", dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Invalid Docker reference
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
manifestPath, "" /* empty reference */, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "empty signature content")
|
||||
|
||||
// Unknown key.
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
manifestPath, dockerReference, "UNKNOWN GPG FINGERPRINT")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
// Error writing output
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/full",
|
||||
manifestPath, dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/dev/full")
|
||||
|
||||
// Success
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err = runSkopeo("standalone-sign", "-o", sigOutput.Name(),
|
||||
manifestPath, dockerReference, fixturesTestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
sig, err := ioutil.ReadFile(sigOutput.Name())
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
mech, err = signature.NewGPGSigningMechanism()
|
||||
require.NoError(t, err)
|
||||
defer mech.Close()
|
||||
verified, err := signature.VerifyDockerManifestSignature(sig, manifest, dockerReference, mech, fixturesTestKeyFingerprint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, dockerReference, verified.DockerReference)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest, verified.DockerManifestDigest)
|
||||
}
|
||||
|
||||
func TestStandaloneVerify(t *testing.T) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
signaturePath := "fixtures/image.signature"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2", "a3"},
|
||||
{"a1", "a2", "a3", "a4", "a5"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"standalone-verify"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-verify", "/this/does/not/exist",
|
||||
dockerReference, fixturesTestKeyFingerprint, signaturePath)
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error reading signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, "/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error verifying signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, "fixtures/corrupt.signature")
|
||||
assertTestFailed(t, out, err, "Error verifying signature")
|
||||
|
||||
// Success
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, signaturePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "Signature verified, digest "+fixturesTestImageManifestDigest.String()+"\n", out)
|
||||
}
|
||||
|
||||
func TestUntrustedSignatureDump(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
{"a1", "a2", "a3", "a4"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"untrusted-signature-dump-without-verification"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("untrusted-signature-dump-without-verification",
|
||||
"/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error reading signature (input is not a signature)
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", "fixtures/image.manifest.json")
|
||||
assertTestFailed(t, out, err, "Error decoding untrusted signature")
|
||||
|
||||
// Success
|
||||
for _, path := range []string{"fixtures/image.signature", "fixtures/corrupt.signature"} {
|
||||
// Success
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", path)
|
||||
require.NoError(t, err)
|
||||
|
||||
var info signature.UntrustedSignatureInformation
|
||||
err := json.Unmarshal([]byte(out), &info)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest, info.UntrustedDockerManifestDigest)
|
||||
assert.Equal(t, "testing/manifest", info.UntrustedDockerReference)
|
||||
assert.NotNil(t, info.UntrustedCreatorID)
|
||||
assert.Equal(t, "atomic ", *info.UntrustedCreatorID)
|
||||
assert.NotNil(t, info.UntrustedTimestamp)
|
||||
assert.True(t, time.Unix(1458239713, 0).Equal(*info.UntrustedTimestamp))
|
||||
assert.Equal(t, fixturesTestKeyShortID, info.UntrustedShortKeyIdentifier)
|
||||
}
|
||||
}
|
||||
610
cmd/skopeo/sync.go
Normal file
610
cmd/skopeo/sync.go
Normal file
@@ -0,0 +1,610 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// syncOptions contains information retrieved from the skopeo sync command line.
|
||||
type syncOptions struct {
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
type repoDescriptor struct {
|
||||
DirBasePath string // base path when source is 'dir'
|
||||
ImageRefs []types.ImageReference // List of tagged image found for the repository
|
||||
Context *types.SystemContext // SystemContext for the sync command
|
||||
}
|
||||
|
||||
// tlsVerify is an implementation of the Unmarshaler interface, used to
|
||||
// customize the unmarshaling behaviour of the tls-verify YAML key.
|
||||
type tlsVerifyConfig struct {
|
||||
skip types.OptionalBool // skip TLS verification check (false by default)
|
||||
}
|
||||
|
||||
// registrySyncConfig contains information about a single registry, read from
|
||||
// the source YAML file
|
||||
type registrySyncConfig struct {
|
||||
Images map[string][]string // Images map images name to slices with the images' references (tags, digests)
|
||||
ImagesByTagRegex map[string]string `yaml:"images-by-tag-regex"` // Images map images name to regular expression with the images' tags
|
||||
Credentials types.DockerAuthConfig // Username and password used to authenticate with the registry
|
||||
TLSVerify tlsVerifyConfig `yaml:"tls-verify"` // TLS verification mode (enabled by default)
|
||||
CertDir string `yaml:"cert-dir"` // Path to the TLS certificates of the registry
|
||||
}
|
||||
|
||||
// sourceConfig contains all registries information read from the source YAML file
|
||||
type sourceConfig map[string]registrySyncConfig
|
||||
|
||||
func syncCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := dockerImageFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
|
||||
opts := syncOptions{
|
||||
global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: &imageDestOptions{imageOptions: destOpts},
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "sync [command options] --src SOURCE-LOCATION --dest DESTINATION-LOCATION SOURCE DESTINATION",
|
||||
Short: "Synchronize one or more images from one location to another",
|
||||
Long: fmt.Sprint(`Copy all the images from a SOURCE to a DESTINATION.
|
||||
|
||||
Allowed SOURCE transports (specified with --src): docker, dir, yaml.
|
||||
Allowed DESTINATION transports (specified with --dest): docker, dir.
|
||||
|
||||
See skopeo-sync(1) for details.
|
||||
`),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&srcFlags)
|
||||
flags.AddFlagSet(&destFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// unmarshalYAML is the implementation of the Unmarshaler interface method
|
||||
// method for the tlsVerifyConfig type.
|
||||
// It unmarshals the 'tls-verify' YAML key so that, when they key is not
|
||||
// specified, tls verification is enforced.
|
||||
func (tls *tlsVerifyConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var verify bool
|
||||
if err := unmarshal(&verify); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tls.skip = types.NewOptionalBool(!verify)
|
||||
return nil
|
||||
}
|
||||
|
||||
// newSourceConfig unmarshals the provided YAML file path to the sourceConfig type.
|
||||
// It returns a new unmarshaled sourceConfig object and any error encountered.
|
||||
func newSourceConfig(yamlFile string) (sourceConfig, error) {
|
||||
var cfg sourceConfig
|
||||
source, err := ioutil.ReadFile(yamlFile)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
err = yaml.Unmarshal(source, &cfg)
|
||||
if err != nil {
|
||||
return cfg, errors.Wrapf(err, "Failed to unmarshal %q", yamlFile)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// parseRepositoryReference parses input into a reference.Named, and verifies that it names a repository, not an image.
|
||||
func parseRepositoryReference(input string) (reference.Named, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return nil, errors.Errorf("input names a reference, not a repository")
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// destinationReference creates an image reference using the provided transport.
|
||||
// It returns a image reference to be used as destination of an image copy and
|
||||
// any error encountered.
|
||||
func destinationReference(destination string, transport string) (types.ImageReference, error) {
|
||||
var imageTransport types.ImageTransport
|
||||
|
||||
switch transport {
|
||||
case docker.Transport.Name():
|
||||
destination = fmt.Sprintf("//%s", destination)
|
||||
imageTransport = docker.Transport
|
||||
case directory.Transport.Name():
|
||||
_, err := os.Stat(destination)
|
||||
if err == nil {
|
||||
return nil, errors.Errorf("Refusing to overwrite destination directory %q", destination)
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, errors.Wrap(err, "Destination directory could not be used")
|
||||
}
|
||||
// the directory holding the image must be created here
|
||||
if err = os.MkdirAll(destination, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating directory for image %s", destination)
|
||||
}
|
||||
imageTransport = directory.Transport
|
||||
default:
|
||||
return nil, errors.Errorf("%q is not a valid destination transport", transport)
|
||||
}
|
||||
logrus.Debugf("Destination for transport %q: %s", transport, destination)
|
||||
|
||||
destRef, err := imageTransport.ParseReference(destination)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", imageTransport.Name(), destination)
|
||||
}
|
||||
|
||||
return destRef, nil
|
||||
}
|
||||
|
||||
// getImageTags lists all tags in a repository.
|
||||
// It returns a string slice of tags and any error encountered.
|
||||
func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef reference.Named) ([]string, error) {
|
||||
name := repoRef.Name()
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"image": name,
|
||||
}).Info("Getting tags")
|
||||
// Ugly: NewReference rejects IsNameOnly references, and GetRepositoryTags ignores the tag/digest.
|
||||
// So, we use TagNameOnly here only to shut up NewReference
|
||||
dockerRef, err := docker.NewReference(reference.TagNameOnly(repoRef))
|
||||
if err != nil {
|
||||
return nil, err // Should never happen for a reference with tag and no digest
|
||||
}
|
||||
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
|
||||
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case docker.ErrUnauthorizedForCredentials:
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
break
|
||||
default:
|
||||
return tags, errors.Wrapf(err, "Error determining repository tags for image %s", name)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// imagesToCopyFromRepo builds a list of image references from the tags
|
||||
// found in a source repository.
|
||||
// It returns an image reference slice with as many elements as the tags found
|
||||
// and any error encountered.
|
||||
func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]types.ImageReference, error) {
|
||||
tags, err := getImageTags(context.Background(), sys, repoRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sourceReferences []types.ImageReference
|
||||
for _, tag := range tags {
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating a reference for repository %s and tag %q", repoRef.Name(), tag)
|
||||
}
|
||||
ref, err := docker.NewReference(taggedRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %s", docker.Transport.Name(), taggedRef.String())
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
}
|
||||
return sourceReferences, nil
|
||||
}
|
||||
|
||||
// imagesTopCopyFromDir builds a list of image references from the images found
|
||||
// in the source directory.
|
||||
// It returns an image reference slice with as many elements as the images found
|
||||
// and any error encountered.
|
||||
func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
var sourceReferences []types.ImageReference
|
||||
err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
dirname := filepath.Dir(path)
|
||||
ref, err := directory.Transport.ParseReference(dirname)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", directory.Transport.Name(), dirname)
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return sourceReferences,
|
||||
errors.Wrapf(err, "Error walking the path %q", dirPath)
|
||||
}
|
||||
|
||||
return sourceReferences, nil
|
||||
}
|
||||
|
||||
// imagesTopCopyFromDir builds a list of repository descriptors from the images
|
||||
// in a registry configuration.
|
||||
// It returns a repository descriptors slice with as many elements as the images
|
||||
// found and any error encountered. Each element of the slice is a list of
|
||||
// image references, to be used as sync source.
|
||||
func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourceCtx types.SystemContext) ([]repoDescriptor, error) {
|
||||
serverCtx := &sourceCtx
|
||||
// override ctx with per-registryName options
|
||||
serverCtx.DockerCertPath = cfg.CertDir
|
||||
serverCtx.DockerDaemonCertPath = cfg.CertDir
|
||||
serverCtx.DockerDaemonInsecureSkipTLSVerify = (cfg.TLSVerify.skip == types.OptionalBoolTrue)
|
||||
serverCtx.DockerInsecureSkipTLSVerify = cfg.TLSVerify.skip
|
||||
if cfg.Credentials != (types.DockerAuthConfig{}) {
|
||||
serverCtx.DockerAuthConfig = &cfg.Credentials
|
||||
}
|
||||
var repoDescList []repoDescriptor
|
||||
for imageName, refs := range cfg.Images {
|
||||
repoLogger := logrus.WithFields(logrus.Fields{
|
||||
"repo": imageName,
|
||||
"registry": registryName,
|
||||
})
|
||||
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", registryName, imageName))
|
||||
if err != nil {
|
||||
repoLogger.Error("Error parsing repository name, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Info("Processing repo")
|
||||
|
||||
var sourceReferences []types.ImageReference
|
||||
if len(refs) != 0 {
|
||||
for _, ref := range refs {
|
||||
tagLogger := logrus.WithFields(logrus.Fields{"ref": ref})
|
||||
var named reference.Named
|
||||
// first try as digest
|
||||
if d, err := digest.Parse(ref); err == nil {
|
||||
named, err = reference.WithDigest(repoRef, d)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error processing ref, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
tagLogger.Debugf("Ref was not a digest, trying as a tag: %s", err)
|
||||
named, err = reference.WithTag(repoRef, ref)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error parsing ref, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
imageRef, err := docker.NewReference(named)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error processing ref, skipping")
|
||||
logrus.Errorf("Error getting image reference: %s", err)
|
||||
continue
|
||||
}
|
||||
sourceReferences = append(sourceReferences, imageRef)
|
||||
}
|
||||
} else { // len(refs) == 0
|
||||
repoLogger.Info("Querying registry for image tags")
|
||||
sourceReferences, err = imagesToCopyFromRepo(serverCtx, repoRef)
|
||||
if err != nil {
|
||||
repoLogger.Error("Error processing repo, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(sourceReferences) == 0 {
|
||||
repoLogger.Warnf("No refs to sync found")
|
||||
continue
|
||||
}
|
||||
repoDescList = append(repoDescList, repoDescriptor{
|
||||
ImageRefs: sourceReferences,
|
||||
Context: serverCtx})
|
||||
}
|
||||
|
||||
for imageName, tagRegex := range cfg.ImagesByTagRegex {
|
||||
repoLogger := logrus.WithFields(logrus.Fields{
|
||||
"repo": imageName,
|
||||
"registry": registryName,
|
||||
})
|
||||
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", registryName, imageName))
|
||||
if err != nil {
|
||||
repoLogger.Error("Error parsing repository name, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Info("Processing repo")
|
||||
|
||||
var sourceReferences []types.ImageReference
|
||||
|
||||
tagReg, err := regexp.Compile(tagRegex)
|
||||
if err != nil {
|
||||
repoLogger.WithFields(logrus.Fields{
|
||||
"regex": tagRegex,
|
||||
}).Error("Error parsing regex, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Info("Querying registry for image tags")
|
||||
allSourceReferences, err := imagesToCopyFromRepo(serverCtx, repoRef)
|
||||
if err != nil {
|
||||
repoLogger.Error("Error processing repo, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Infof("Start filtering using the regular expression: %v", tagRegex)
|
||||
for _, sReference := range allSourceReferences {
|
||||
tagged, isTagged := sReference.DockerReference().(reference.Tagged)
|
||||
if !isTagged {
|
||||
repoLogger.Errorf("Internal error, reference %s does not have a tag, skipping", sReference.DockerReference())
|
||||
continue
|
||||
}
|
||||
if tagReg.MatchString(tagged.Tag()) {
|
||||
sourceReferences = append(sourceReferences, sReference)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sourceReferences) == 0 {
|
||||
repoLogger.Warnf("No refs to sync found")
|
||||
continue
|
||||
}
|
||||
repoDescList = append(repoDescList, repoDescriptor{
|
||||
ImageRefs: sourceReferences,
|
||||
Context: serverCtx})
|
||||
}
|
||||
|
||||
return repoDescList, nil
|
||||
}
|
||||
|
||||
// imagesToCopy retrieves all the images to copy from a specified sync source
|
||||
// and transport.
|
||||
// It returns a slice of repository descriptors, where each descriptor is a
|
||||
// list of tagged image references to be used as sync source, and any error
|
||||
// encountered.
|
||||
func imagesToCopy(source string, transport string, sourceCtx *types.SystemContext) ([]repoDescriptor, error) {
|
||||
var descriptors []repoDescriptor
|
||||
|
||||
switch transport {
|
||||
case docker.Transport.Name():
|
||||
desc := repoDescriptor{
|
||||
Context: sourceCtx,
|
||||
}
|
||||
named, err := reference.ParseNormalizedNamed(source) // May be a repository or an image.
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), source)
|
||||
}
|
||||
imageTagged := !reference.IsNameOnly(named)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"imagename": source,
|
||||
"tagged": imageTagged,
|
||||
}).Info("Tag presence check")
|
||||
if imageTagged {
|
||||
srcRef, err := docker.NewReference(named)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), named.String())
|
||||
}
|
||||
desc.ImageRefs = []types.ImageReference{srcRef}
|
||||
} else {
|
||||
desc.ImageRefs, err = imagesToCopyFromRepo(sourceCtx, named)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
|
||||
case directory.Transport.Name():
|
||||
desc := repoDescriptor{
|
||||
Context: sourceCtx,
|
||||
}
|
||||
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return descriptors, errors.Wrap(err, "Invalid source directory specified")
|
||||
}
|
||||
desc.DirBasePath = source
|
||||
var err error
|
||||
desc.ImageRefs, err = imagesToCopyFromDir(source)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
|
||||
case "yaml":
|
||||
cfg, err := newSourceConfig(source)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
for registryName, registryConfig := range cfg {
|
||||
if len(registryConfig.Images) == 0 && len(registryConfig.ImagesByTagRegex) == 0 {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"registry": registryName,
|
||||
}).Warn("No images specified for registry")
|
||||
continue
|
||||
}
|
||||
|
||||
descs, err := imagesToCopyFromRegistry(registryName, registryConfig, *sourceCtx)
|
||||
if err != nil {
|
||||
return descriptors, errors.Wrapf(err, "Failed to retrieve list of images from registry %q", registryName)
|
||||
}
|
||||
descriptors = append(descriptors, descs...)
|
||||
}
|
||||
}
|
||||
|
||||
return descriptors, nil
|
||||
}
|
||||
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error loading trust policy")
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
// validate source and destination options
|
||||
contains := func(val string, list []string) (_ bool) {
|
||||
for _, l := range list {
|
||||
if l == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if len(opts.source) == 0 {
|
||||
return errors.New("A source transport must be specified")
|
||||
}
|
||||
if !contains(opts.source, []string{docker.Transport.Name(), directory.Transport.Name(), "yaml"}) {
|
||||
return errors.Errorf("%q is not a valid source transport", opts.source)
|
||||
}
|
||||
|
||||
if len(opts.destination) == 0 {
|
||||
return errors.New("A destination transport must be specified")
|
||||
}
|
||||
if !contains(opts.destination, []string{docker.Transport.Name(), directory.Transport.Name()}) {
|
||||
return errors.Errorf("%q is not a valid destination transport", opts.destination)
|
||||
}
|
||||
|
||||
if opts.source == opts.destination && opts.source == directory.Transport.Name() {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
sourceArg := args[0]
|
||||
var srcRepoList []repoDescriptor
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
destination := args[1]
|
||||
destinationCtx, err := opts.destImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imagesNumber := 0
|
||||
options := copy.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: os.Stdout,
|
||||
DestinationCtx: destinationCtx,
|
||||
ImageListSelection: imageListSelection,
|
||||
OptimizeDestinationImageAlreadyExists: true,
|
||||
}
|
||||
|
||||
for _, srcRepo := range srcRepoList {
|
||||
options.SourceCtx = srcRepo.Context
|
||||
for counter, ref := range srcRepo.ImageRefs {
|
||||
var destSuffix string
|
||||
switch ref.Transport() {
|
||||
case docker.Transport:
|
||||
// docker -> dir or docker -> docker
|
||||
destSuffix = ref.DockerReference().String()
|
||||
case directory.Transport:
|
||||
// dir -> docker (we don't allow `dir` -> `dir` sync operations)
|
||||
destSuffix = strings.TrimPrefix(ref.StringWithinTransport(), srcRepo.DirBasePath)
|
||||
if destSuffix == "" {
|
||||
// if source is a full path to an image, have destPath scoped to repo:tag
|
||||
destSuffix = path.Base(srcRepo.DirBasePath)
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.scoped {
|
||||
destSuffix = path.Base(destSuffix)
|
||||
}
|
||||
|
||||
destRef, err := destinationReference(path.Join(destination, destSuffix), opts.destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"from": transports.ImageName(ref),
|
||||
"to": transports.ImageName(destRef),
|
||||
}).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error copying ref %q", transports.ImageName(ref))
|
||||
}
|
||||
imagesNumber++
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
return nil
|
||||
}
|
||||
11
cmd/skopeo/unshare.go
Normal file
11
cmd/skopeo/unshare.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
func maybeReexec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(inputImageNames ...string) error {
|
||||
return nil
|
||||
}
|
||||
48
cmd/skopeo/unshare_linux.go
Normal file
48
cmd/skopeo/unshare_linux.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
var neededCapabilities = []capability.Cap{
|
||||
capability.CAP_CHOWN,
|
||||
capability.CAP_DAC_OVERRIDE,
|
||||
capability.CAP_FOWNER,
|
||||
capability.CAP_FSETID,
|
||||
capability.CAP_MKNOD,
|
||||
capability.CAP_SETFCAP,
|
||||
}
|
||||
|
||||
func maybeReexec() error {
|
||||
// With Skopeo we need only the subset of the root capabilities necessary
|
||||
// for pulling an image to the storage. Do not attempt to create a namespace
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
// We miss a capability we need, create a user namespaces
|
||||
unshare.MaybeReexecUsingUserNamespace(true)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(imageNames ...string) error {
|
||||
// Check if container-storage is used before doing unshare
|
||||
for _, imageName := range imageNames {
|
||||
transport := alltransports.TransportFromImageName(imageName)
|
||||
// Hard-code the storage name to avoid a reference on c/image/storage.
|
||||
// See https://github.com/containers/skopeo/issues/771#issuecomment-563125006.
|
||||
if transport != nil && transport.Name() == "containers-storage" {
|
||||
return maybeReexec()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
276
cmd/skopeo/utils.go
Normal file
276
cmd/skopeo/utils.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// commandAction intermediates between the RunE interface and the real handler,
|
||||
// primarily to ensure that cobra.Command is not available to the handler, which in turn
|
||||
// makes sure that the cmd.Flags() etc. flag access functions are not used,
|
||||
// and everything is done using the *Options structures and the *Var() methods of cmd.Flag().
|
||||
// handler may return errorShouldDisplayUsage to cause c.Help to be called.
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd *cobra.Command, args []string) error {
|
||||
return func(c *cobra.Command, args []string) error {
|
||||
err := handler(args, c.OutOrStdout())
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
c.Help()
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
|
||||
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
|
||||
type sharedImageOptions struct {
|
||||
authFilePath string // Path to a */containers/auth.json
|
||||
}
|
||||
|
||||
// sharedImageFlags prepares a collection of CLI flags writing into sharedImageOptions, and the managed sharedImageOptions structure.
|
||||
func sharedImageFlags() (pflag.FlagSet, *sharedImageOptions) {
|
||||
opts := sharedImageOptions{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.StringVar(&opts.authFilePath, "authfile", os.Getenv("REGISTRY_AUTH_FILE"), "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// dockerImageOptions collects CLI flags specific to the "docker" transport, which are
|
||||
// the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type dockerImageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type imageOptions struct {
|
||||
dockerImageOptions
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
}
|
||||
|
||||
// dockerImageFlags prepares a collection of docker-transport specific CLI flags
|
||||
// writing into imageOptions, and the managed imageOptions structure.
|
||||
func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
flags := imageOptions{
|
||||
dockerImageOptions: dockerImageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
},
|
||||
}
|
||||
|
||||
fs := pflag.FlagSet{}
|
||||
if flagPrefix != "" {
|
||||
// the non-prefixed flag is handled by a shared flag.
|
||||
fs.Var(newOptionalStringValue(&flags.authFilePath), flagPrefix+"authfile", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
|
||||
}
|
||||
fs.Var(newOptionalStringValue(&flags.credsOption), flagPrefix+"creds", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
if credsOptionAlias != "" {
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
f := fs.VarPF(newOptionalStringValue(&flags.credsOption), credsOptionAlias, "", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
f.Hidden = true
|
||||
}
|
||||
fs.Var(newOptionalStringValue(&flags.registryToken), flagPrefix+"registry-token", "Provide a Bearer token for accessing the registry")
|
||||
fs.StringVar(&flags.dockerCertPath, flagPrefix+"cert-dir", "", "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon")
|
||||
optionalBoolFlag(&fs, &flags.tlsVerify, flagPrefix+"tls-verify", "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)")
|
||||
fs.BoolVar(&flags.noCreds, flagPrefix+"no-creds", false, "Access the registry anonymously")
|
||||
return fs, &flags
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
dockerFlags, opts := dockerImageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
|
||||
fs := pflag.FlagSet{}
|
||||
fs.StringVar(&opts.sharedBlobDir, flagPrefix+"shared-blob-dir", "", "`DIRECTORY` to use to share blobs across OCI repositories")
|
||||
fs.StringVar(&opts.dockerDaemonHost, flagPrefix+"daemon-host", "", "use docker daemon host at `HOST` (docker-daemon: only)")
|
||||
fs.AddFlagSet(&dockerFlags)
|
||||
return fs, opts
|
||||
}
|
||||
|
||||
type retryOptions struct {
|
||||
maxRetry int // The number of times to possibly retry
|
||||
}
|
||||
|
||||
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
|
||||
opts := retry.RetryOptions{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
// *types.SystemContext instance from globalOptions
|
||||
// imageOptions option overrides the instance if both are present.
|
||||
ctx := opts.global.newSystemContext()
|
||||
ctx.DockerCertPath = opts.dockerCertPath
|
||||
ctx.OCISharedBlobDirPath = opts.sharedBlobDir
|
||||
ctx.AuthFilePath = opts.shared.authFilePath
|
||||
ctx.DockerDaemonHost = opts.dockerDaemonHost
|
||||
ctx.DockerDaemonCertPath = opts.dockerCertPath
|
||||
if opts.dockerImageOptions.authFilePath.present {
|
||||
ctx.AuthFilePath = opts.dockerImageOptions.authFilePath.value
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
if opts.credsOption.present && opts.noCreds {
|
||||
return nil, errors.New("creds and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.registryToken.present {
|
||||
ctx.DockerBearerRegistryToken = opts.registryToken.value
|
||||
}
|
||||
if opts.noCreds {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel optionalInt // Level to use for the compression
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
fs.BoolVar(&opts.ociAcceptUncompressedLayers, flagPrefix+"oci-accept-uncompressed-layers", false, "Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)")
|
||||
fs.StringVar(&opts.compressionFormat, flagPrefix+"compress-format", "", "`FORMAT` to use for the compression")
|
||||
fs.Var(newOptionalIntValue(&opts.compressionLevel), flagPrefix+"compress-level", "`LEVEL` to use for the compression")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx, err := opts.imageOptions.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx.DirForceCompress = opts.dirForceCompression
|
||||
ctx.OCIAcceptUncompressedLayers = opts.ociAcceptUncompressedLayers
|
||||
if opts.compressionFormat != "" {
|
||||
cf, err := compression.AlgorithmByName(opts.compressionFormat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx.CompressionFormat = &cf
|
||||
}
|
||||
if opts.compressionLevel.present {
|
||||
ctx.CompressionLevel = &opts.compressionLevel.value
|
||||
}
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
}
|
||||
up := strings.SplitN(creds, ":", 2)
|
||||
if len(up) == 1 {
|
||||
return up[0], "", nil
|
||||
}
|
||||
if up[0] == "" {
|
||||
return "", "", errors.New("username can't be empty")
|
||||
}
|
||||
return up[0], up[1], nil
|
||||
}
|
||||
|
||||
func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
username, password, err := parseCreds(creds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(ctx context.Context, opts *imageOptions, name string) (types.ImageSource, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref.NewImageSource(ctx, sys)
|
||||
}
|
||||
|
||||
// usageTemplate returns the usage template for skopeo commands
|
||||
// This blocks the displaying of the global options. The main skopeo
|
||||
// command should not use this.
|
||||
const usageTemplate = `Usage:{{if .Runnable}}
|
||||
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
|
||||
|
||||
Aliases:
|
||||
{{.NameAndAliases}}{{end}}{{if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
|
||||
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
// adjustUsage uses usageTemplate template to get rid the GlobalOption from usage
|
||||
// and disable [flag] at the end of command usage
|
||||
func adjustUsage(c *cobra.Command) {
|
||||
c.SetUsageTemplate(usageTemplate)
|
||||
c.DisableFlagsInUseLine = true
|
||||
}
|
||||
249
cmd/skopeo/utils_test.go
Normal file
249
cmd/skopeo/utils_test.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) (*globalOptions, *cobra.Command) {
|
||||
app, opts := createApp()
|
||||
cmd := &cobra.Command{}
|
||||
app.AddCommand(cmd)
|
||||
err := cmd.ParseFlags(flags)
|
||||
require.NoError(t, err)
|
||||
return opts, cmd
|
||||
}
|
||||
|
||||
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
cmd.Flags().AddFlagSet(&sharedFlags)
|
||||
cmd.Flags().AddFlagSet(&imageFlags)
|
||||
err := cmd.ParseFlags(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
"--override-variant", "overridden-variant",
|
||||
"--tmpdir", "/srv",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-authfile", "/srv/dest-authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
"--dest-registry-token", "faketoken",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/dest-authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerBearerRegistryToken: "faketoken",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior
|
||||
for _, c := range []struct {
|
||||
global, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", types.OptionalBoolUndefined, false},
|
||||
{"", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
|
||||
// Invalid option values
|
||||
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
cmd.Flags().AddFlagSet(&sharedFlags)
|
||||
cmd.Flags().AddFlagSet(&imageFlags)
|
||||
err := cmd.ParseFlags(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
oldXRD, hasXRD := os.LookupEnv("REGISTRY_AUTH_FILE")
|
||||
defer func() {
|
||||
if hasXRD {
|
||||
os.Setenv("REGISTRY_AUTH_FILE", oldXRD)
|
||||
} else {
|
||||
os.Unsetenv("REGISTRY_AUTH_FILE")
|
||||
}
|
||||
}()
|
||||
authFile := "/tmp/auth.json"
|
||||
// Make sure when REGISTRY_AUTH_FILE is set the auth file is used
|
||||
os.Setenv("REGISTRY_AUTH_FILE", authFile)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
|
||||
"--dest-compress=false",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
AuthFilePath: authFile,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
"--override-variant", "overridden-variant",
|
||||
"--tmpdir", "/srv",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-compress=true",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
"--dest-registry-token", "faketoken",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerBearerRegistryToken: "faketoken",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
DirForceCompress: true,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
}, res)
|
||||
|
||||
// Invalid option values in imageOptions
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// since there is a shared authfile image option and a non-shared (prefixed) one, make sure the override logic
|
||||
// works correctly.
|
||||
func TestImageOptionsAuthfileOverride(t *testing.T) {
|
||||
|
||||
for _, testCase := range []struct {
|
||||
flagPrefix string
|
||||
cmdFlags []string
|
||||
expectedAuthfilePath string
|
||||
}{
|
||||
// if there is no prefix, only authfile is allowed.
|
||||
{"",
|
||||
[]string{
|
||||
"--authfile", "/srv/authfile",
|
||||
}, "/srv/authfile"},
|
||||
// if authfile and dest-authfile is provided, dest-authfile wins
|
||||
{"dest-",
|
||||
[]string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-authfile", "/srv/dest-authfile",
|
||||
}, "/srv/dest-authfile",
|
||||
},
|
||||
// if only the shared authfile is provided, authfile must be present in system context
|
||||
{"dest-",
|
||||
[]string{
|
||||
"--authfile", "/srv/authfile",
|
||||
}, "/srv/authfile",
|
||||
},
|
||||
// if only the dest authfile is provided, dest-authfile must be present in system context
|
||||
{"dest-",
|
||||
[]string{
|
||||
"--dest-authfile", "/srv/dest-authfile",
|
||||
}, "/srv/dest-authfile",
|
||||
},
|
||||
} {
|
||||
opts := fakeImageOptions(t, testCase.flagPrefix, []string{}, testCase.cmdFlags)
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
AuthFilePath: testCase.expectedAuthfilePath,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
}
|
||||
}
|
||||
279
completions/bash/skopeo
Normal file
279
completions/bash/skopeo
Normal file
@@ -0,0 +1,279 @@
|
||||
#! /bin/bash
|
||||
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" ] || [ "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$boolean_options $options_with_args" -- "$cur")
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$transports" -- "$cur")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
skopeo "$subcommand" --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--src-authfile
|
||||
--dest-authfile
|
||||
--format -f
|
||||
--sign-by
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-dir
|
||||
--dest-tls-verify
|
||||
--src-daemon-host
|
||||
--dest-daemon-host
|
||||
--src-registry-token
|
||||
--dest-registry-token
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all
|
||||
--dest-compress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
--dest-oci-accept-uncompressed-layers
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--format
|
||||
--retry-times
|
||||
--registry-token
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_standalone_verify() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_manifest_digest() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_delete() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_list_repository_tags() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_login() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--password -p
|
||||
--username -u
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--get-login
|
||||
--tls-verify
|
||||
--password-stdin
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_logout() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all -a
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
# XXX: Changes here need to be reflected in the manually expanded
|
||||
# string in the `case` statement below as well.
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--override-variant
|
||||
--command-timeout
|
||||
--tmpdir
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
|
||||
local commands=(
|
||||
copy
|
||||
delete
|
||||
inspect
|
||||
list-tags
|
||||
login
|
||||
logout
|
||||
manifest-digest
|
||||
standalone-sign
|
||||
standalone-verify
|
||||
sync
|
||||
help
|
||||
h
|
||||
)
|
||||
|
||||
case "$prev" in
|
||||
# XXX: Changes here need to be reflected in $options_with_args as well.
|
||||
--policy|--registries.d|--override-arch|--override-os|--override-variant|--command-timeout)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$boolean_options $options_with_args" -- "$cur")
|
||||
;;
|
||||
*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "${commands[*]} help" -- "$cur")
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur
|
||||
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
local command="skopeo" cpos=0
|
||||
local counter=1
|
||||
while [ $counter -lt "$cword" ]; do
|
||||
case "${words[$counter]}" in
|
||||
skopeo|copy|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
|
||||
command="${words[$counter]//-/_}"
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
declare -F "$completions_func" >/dev/null && $completions_func
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete skopeo
|
||||
43
contrib/skopeoimage/README.md
Normal file
43
contrib/skopeoimage/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
# skopeoimage
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains the Dockerfiles necessary to create the three skopeoimage container
|
||||
images that are housed on quay.io under the skopeo account. All three repositories where
|
||||
the images live are public and can be pulled without credentials. These container images
|
||||
are secured and the resulting containers can run safely. The container images are built
|
||||
using the latest Fedora and then Skopeo is installed into them:
|
||||
|
||||
* quay.io/skopeo/stable - This image is built using the latest stable version of Skopeo in a Fedora based container. Built with skopeoimage/stable/Dockerfile.
|
||||
* quay.io/skopeo/upstream - This image is built using the latest code found in this GitHub repository. When someone creates a commit and pushes it, the image is created. Due to that the image changes frequently and is not guaranteed to be stable. Built with skopeoimage/upstream/Dockerfile.
|
||||
* quay.io/skopeo/testing - This image is built using the latest version of Skopeo that is or was in updates testing for Fedora. At times this may be the same as the stable image. This container image will primarily be used by the development teams for verification testing when a new package is created. Built with skopeoimage/testing/Dockerfile.
|
||||
|
||||
## Multiarch images
|
||||
|
||||
Multiarch images are available for Skopeo upstream and stable versions. Supported architectures are `amd64`, `s390x`, `ppc64le`.
|
||||
Available images are `quay.io/skopeo/upstream:master`, `quay.io/skopeo/stable:v1.2.0`, `quay.io/containers/skopeo:v1.2.0`.
|
||||
|
||||
Images can be used the same way as in a single architecture case, no extra setup is required. For samples see next chapter.
|
||||
|
||||
## Sample Usage
|
||||
|
||||
Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
|
||||
|
||||
```
|
||||
# Get Help on Skopeo
|
||||
podman run docker://quay.io/skopeo/stable:latest --help
|
||||
|
||||
# Get help on the Skopeo Copy command
|
||||
podman run docker://quay.io/skopeo/stable:latest copy --help
|
||||
|
||||
# Copy the Skopeo container image from quay.io to
|
||||
# a private registry
|
||||
podman run docker://quay.io/skopeo/stable:latest copy docker://quay.io/skopeo/stable docker://registry.internal.company.com/skopeo
|
||||
|
||||
# Inspect the fedora:latest image
|
||||
podman run docker://quay.io/skopeo/stable:latest inspect --config docker://registry.fedoraproject.org/fedora:latest | jq
|
||||
```
|
||||
33
contrib/skopeoimage/stable/Dockerfile
Normal file
33
contrib/skopeoimage/stable/Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
# stable/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# stable version of Skopeo on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
34
contrib/skopeoimage/testing/Dockerfile
Normal file
34
contrib/skopeoimage/testing/Dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
# testing/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# version of Skopeo that is in updates-testing
|
||||
# on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
54
contrib/skopeoimage/upstream/Dockerfile
Normal file
54
contrib/skopeoimage/upstream/Dockerfile
Normal file
@@ -0,0 +1,54 @@
|
||||
# upstream/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# upstream version of Skopeo on GitHub.
|
||||
# https://github.com/containers/skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; \
|
||||
yum -y install make \
|
||||
golang \
|
||||
git \
|
||||
go-md2man \
|
||||
fuse-overlayfs \
|
||||
fuse3 \
|
||||
containers-common \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel --enablerepo updates-testing --exclude container-selinux; \
|
||||
mkdir /root/skopeo; \
|
||||
git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/containers/skopeo; \
|
||||
export GOPATH=/root/skopeo; \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo; \
|
||||
make bin/skopeo;\
|
||||
make PREFIX=/usr install;\
|
||||
rm -rf /root/skopeo/*; \
|
||||
yum -y remove git golang go-md2man make; \
|
||||
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
14
default-policy.json
Normal file
14
default-policy.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"default": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
],
|
||||
"transports":
|
||||
{
|
||||
"docker-daemon":
|
||||
{
|
||||
"": [{"type":"insecureAcceptAnything"}]
|
||||
}
|
||||
}
|
||||
}
|
||||
26
default.yaml
Normal file
26
default.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# sigstore: indicates a location that is read and write
|
||||
# sigstore-staging: indicates a location that is only for write
|
||||
#
|
||||
# sigstore and sigstore-staging take a value of the following:
|
||||
# sigstore: {schema}://location
|
||||
#
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# This is the default signature write location for docker registries.
|
||||
default-docker:
|
||||
# sigstore: file:///var/lib/containers/sigstore
|
||||
sigstore-staging: file:///var/lib/containers/sigstore
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
#
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# sigstore: http://privateregistry.com/sigstore/
|
||||
# sigstore-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
152
docs/skopeo-copy.1.md
Normal file
152
docs/skopeo-copy.1.md
Normal file
@@ -0,0 +1,152 @@
|
||||
% skopeo-copy(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
|
||||
## DESCRIPTION
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
_source-image_ and _destination-image_ are interpreted completely independently; e.g. the destination name does not
|
||||
automatically inherit any parts of the source name.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all**
|
||||
|
||||
If _source-image_ refers to a list of images, instead of copying just the image which matches the current OS and
|
||||
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
|
||||
the images in the list, and the list itself.
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
|
||||
environment variable. `export REGISTRY_AUTH_FILE=path`
|
||||
|
||||
**--src-authfile** _path_
|
||||
|
||||
Path of the authentication file for the source registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dest-authfile** _path_
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--encryption-key** _protocol:keyfile_ specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
|
||||
|
||||
**--decryption-key** _key[:passphrase]_ to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry.
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
|
||||
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry.
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
**--dest-compress-format** _format_ Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
|
||||
|
||||
**--dest-compress-level** _format_ Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
|
||||
|
||||
**--src-registry-token** _Bearer token_ for accessing the source registry.
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To just copy an image from one registry to another:
|
||||
```sh
|
||||
$ skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest
|
||||
```
|
||||
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
# skopeo copy --sign-by dev@example.com containers-storage:example/busybox:streaming docker://example/busybox:gold
|
||||
```
|
||||
|
||||
To encrypt an image:
|
||||
```sh
|
||||
skopeo copy docker://docker.io/library/nginx:1.17.8 oci:local_nginx:1.17.8
|
||||
|
||||
openssl genrsa -out private.key 1024
|
||||
openssl rsa -in private.key -pubout > public.key
|
||||
|
||||
skopeo copy --encryption-key jwe:./public.key oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```
|
||||
|
||||
To copy encrypted image without decryption:
|
||||
```sh
|
||||
skopeo copy oci:try-encrypt:encrypted oci:try-encrypt-copy:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image that requires more than one key:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private1.key --decryption-key ./private2.key --decryption-key ./private3.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```
|
||||
|
||||
Container images can also be partially encrypted by specifying the index of the layer. Layers are 0-indexed indices, with support for negative indexing. i.e. 0 is the first layer, -1 is the last layer.
|
||||
|
||||
Let's say out of 3 layers that the image `docker.io/library/nginx:1.17.8` is made up of, we only want to encrypt the 2nd layer,
|
||||
```sh
|
||||
skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
54
docs/skopeo-delete.1.md
Normal file
54
docs/skopeo-delete.1.md
Normal file
@@ -0,0 +1,54 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garbage collector.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry.
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
**--registry-token** _Bearer token_ for accessing the registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
104
docs/skopeo-inspect.1.md
Normal file
104
docs/skopeo-inspect.1.md
Normal file
@@ -0,0 +1,104 @@
|
||||
% skopeo-inspect(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [*options*] _image-name_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** _path_
|
||||
|
||||
Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
|
||||
**--config**
|
||||
|
||||
Output configuration in OCI format, default is to format in JSON format.
|
||||
|
||||
**--creds** _username[:password]_
|
||||
|
||||
Username and password for accessing the registry.
|
||||
|
||||
**--format**, **-f**=*format*
|
||||
|
||||
Format the output using the given Go template.
|
||||
The keys of the returned JSON can be used as the values for the --format flag (see examples below).
|
||||
|
||||
**--no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--raw**
|
||||
|
||||
Output raw manifest or config data depending on --config option.
|
||||
The --format option is not supported with --raw option.
|
||||
|
||||
**--registry-token** _Bearer token_
|
||||
|
||||
Registry token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**
|
||||
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
$ /bin/skopeo inspect --config docker://registry.fedoraproject.org/fedora --format "{{ .Architecture }}"
|
||||
amd64
|
||||
```
|
||||
|
||||
```
|
||||
$ /bin/skopeo inspect --format '{{ .Env }}' docker://registry.access.redhat.com/ubi8
|
||||
[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin container=oci]
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
104
docs/skopeo-list-tags.1.md
Normal file
104
docs/skopeo-list-tags.1.md
Normal file
@@ -0,0 +1,104 @@
|
||||
% skopeo-list-tags(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-list\-tags - Return a list of tags for the transport-specific image repository.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo list-tags** _repository-name_
|
||||
|
||||
Return a list of tags from _repository-name_ in a registry.
|
||||
|
||||
_repository-name_ name of repository to retrieve tag listing from
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry.
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--registry-token** _Bearer token_ for accessing the registry.
|
||||
|
||||
## REPOSITORY NAMES
|
||||
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.
|
||||
|
||||
This commands refers to repositories using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
**docker://**_docker-repository-reference_
|
||||
A repository in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(skopeo login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
A _docker-repository-reference_ is of the form: **registryhost:port/repositoryname** which is similar to an _image-reference_ but with no tag or digest allowed as the last component (e.g no `:latest` or `@sha256:xyz`)
|
||||
|
||||
Examples of valid docker-repository-references:
|
||||
"docker.io/myuser/myrepo"
|
||||
"docker.io/nginx"
|
||||
"docker.io/library/fedora"
|
||||
"localhost:5000/myrepository"
|
||||
|
||||
Examples of invalid references:
|
||||
"docker.io/nginx:latest"
|
||||
"docker.io/myuser/myimage:v1.0"
|
||||
"docker.io/myuser/myimage@sha256:f48c4cc192f4c3c6a069cb5cca6d0a9e34d6076ba7c214fd0cc3ca60e0af76bb"
|
||||
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
### Docker Transport
|
||||
To get the list of tags in the "fedora" repository from the docker.io registry (the repository name expands to "library/fedora" per docker transport canonical form):
|
||||
```sh
|
||||
$ skopeo list-tags docker://docker.io/fedora
|
||||
{
|
||||
"Repository": "docker.io/library/fedora",
|
||||
"Tags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"25",
|
||||
"26-modular",
|
||||
"26",
|
||||
"27",
|
||||
"28",
|
||||
"29",
|
||||
"30",
|
||||
"31",
|
||||
"32",
|
||||
"branched",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"modular",
|
||||
"rawhide"
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To list the tags in a local host docker/distribution registry on port 5000, in this case for the "fedora" repository:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker://localhost:5000/fedora
|
||||
{
|
||||
"Repository": "localhost:5000/fedora",
|
||||
"Tags": [
|
||||
"latest",
|
||||
"30",
|
||||
"31"
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Zach Hill <zach@anchore.com>
|
||||
|
||||
101
docs/skopeo-login.1.md
Normal file
101
docs/skopeo-login.1.md
Normal file
@@ -0,0 +1,101 @@
|
||||
% skopeo-login(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-login - Login to a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo login** [*options*] *registry*
|
||||
|
||||
## DESCRIPTION
|
||||
**skopeo login** logs into a specified registry server with the correct username
|
||||
and password. **skopeo login** reads in the username and password from STDIN.
|
||||
The username and password can also be set using the **username** and **password** flags.
|
||||
The path of the authentication file can be specified by the user by setting the **authfile**
|
||||
flag. The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--password**, **-p**=*password*
|
||||
|
||||
Password for registry
|
||||
|
||||
**--password-stdin**
|
||||
|
||||
Take the password from stdin
|
||||
|
||||
**--username**, **-u**=*username*
|
||||
|
||||
Username for registry
|
||||
|
||||
**--authfile**=*path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json
|
||||
|
||||
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
|
||||
environment variable. `export REGISTRY_AUTH_FILE=path`
|
||||
|
||||
**--get-login**
|
||||
|
||||
Return the logged-in user for the registry. Return error if no login is found.
|
||||
|
||||
**--cert-dir**=*path*
|
||||
|
||||
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--tls-verify**=*true|false*
|
||||
|
||||
Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
|
||||
then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
|
||||
TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
$ skopeo login docker.io
|
||||
Username: testuser
|
||||
Password:
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login -u testuser -p testpassword localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login --authfile authdir/myauths.json docker.io
|
||||
Username: testuser
|
||||
Password:
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login --tls-verify=false -u test -p test localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login -u testuser --password-stdin < testpassword.txt docker.io
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ echo $testpassword | skopeo login -u testuser --password-stdin docker.io
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-logout(1), containers-auth.json(5), containers-registries.conf(5), containers-certs.d.5.md
|
||||
|
||||
## HISTORY
|
||||
May 2020, Originally compiled by Qi Wang <qiwan@redhat.com>
|
||||
53
docs/skopeo-logout.1.md
Normal file
53
docs/skopeo-logout.1.md
Normal file
@@ -0,0 +1,53 @@
|
||||
% skopeo-logout(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-logout - Logout of a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo logout** [*options*] *registry*
|
||||
|
||||
## DESCRIPTION
|
||||
**skopeo logout** logs out of a specified registry server by deleting the cached credentials
|
||||
stored in the **auth.json** file. The path of the authentication file can be overridden by the user by setting the **authfile** flag.
|
||||
The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
|
||||
All the cached credentials can be removed by setting the **all** flag.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile**=*path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json
|
||||
|
||||
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
|
||||
environment variable. `export REGISTRY_AUTH_FILE=path`
|
||||
|
||||
**--all**, **-a**
|
||||
|
||||
Remove the cached credentials for all registries in the auth file
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
$ skopeo logout docker.io
|
||||
Remove login credentials for docker.io
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo logout --authfile authdir/myauths.json docker.io
|
||||
Remove login credentials for docker.io
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo logout --all
|
||||
Remove login credentials for all registries
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-login(1), containers-auth.json(5)
|
||||
|
||||
## HISTORY
|
||||
May 2020, Originally compiled by Qi Wang <qiwan@redhat.com>
|
||||
26
docs/skopeo-manifest-digest.1.md
Normal file
26
docs/skopeo-manifest-digest.1.md
Normal file
@@ -0,0 +1,26 @@
|
||||
% skopeo-manifest-digest(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-manifest\-digest - Compute a manifest digest for a manifest-file and write it to standard output.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
34
docs/skopeo-standalone-sign.1.md
Normal file
34
docs/skopeo-standalone-sign.1.md
Normal file
@@ -0,0 +1,34 @@
|
||||
% skopeo-standalone-sign(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone-sign - Debugging tool - Publish and sign an image in one step.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-copy(1), containers-signature(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
36
docs/skopeo-standalone-verify.1.md
Normal file
36
docs/skopeo-standalone-verify.1.md
Normal file
@@ -0,0 +1,36 @@
|
||||
% skopeo-standalone-verify(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone\-verify - Verify an image signature.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), containers-signature(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
190
docs/skopeo-sync.1.md
Normal file
190
docs/skopeo-sync.1.md
Normal file
@@ -0,0 +1,190 @@
|
||||
% skopeo-sync(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-sync - Synchronize images between container registries and local directories.
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between container registries and local directories.
|
||||
The synchronization is achieved by copying all the images found at _source_ to _destination_.
|
||||
|
||||
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
|
||||
|
||||
Differently from other skopeo commands, skopeo sync requires both source and destination transports to be specified separately from _source_ and _destination_.
|
||||
One of the problems of prefixing a destination with its transport is that, the registry `docker://hostname:port` would be wrongly interpreted as an image reference at a non-fully qualified registry, with `hostname` and `port` the image name and tag.
|
||||
|
||||
Available _source_ transports:
|
||||
- _docker_ (i.e. `--src docker`): _source_ is a repository hosted on a container registry (e.g.: `registry.example.com/busybox`).
|
||||
If no image tag is specified, skopeo sync copies all the tags found in that repository.
|
||||
- _dir_ (i.e. `--src dir`): _source_ is a local directory path (e.g.: `/media/usb/`). Refer to skopeo(1) **dir:**_path_ for the local image format.
|
||||
- _yaml_ (i.e. `--src yaml`): _source_ is local YAML file path.
|
||||
The YAML file should specify the list of images copied from different container registries (local directories are not supported). Refer to EXAMPLES for the file format.
|
||||
|
||||
Available _destination_ transports:
|
||||
- _docker_ (i.e. `--dest docker`): _destination_ is a container registry (e.g.: `my-registry.local.lan`).
|
||||
- _dir_ (i.e. `--dest dir`): _destination_ is a local directory path (e.g.: `/media/usb/`).
|
||||
One directory per source 'image:tag' is created for each copied image.
|
||||
|
||||
When the `--scoped` option is specified, images are prefixed with the source image path so that multiple images with the same
|
||||
name can be stored at _destination_.
|
||||
|
||||
## OPTIONS
|
||||
**--all**
|
||||
If one of the images in __src__ refers to a list of images, instead of copying just the image which matches the current OS and
|
||||
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
|
||||
the images in the list, and the list itself.
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--src-authfile** _path_
|
||||
|
||||
Path of the authentication file for the source registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dest-authfile** _path_
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--src** _transport_ Transport for the source repository.
|
||||
|
||||
**--dest** _transport_ Destination transport.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ Add a signature using that key ID for an image name corresponding to _destination-image_.
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry.
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry.
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates (*.crt, *.cert, *.key) at _path_ to connect to the source registry or daemon.
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to a container source registry or daemon (defaults to true).
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates (*.crt, *.cert, *.key) at _path_ to connect to the destination registry or daemon.
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to a container destination registry or daemon (defaults to true).
|
||||
|
||||
**--src-registry-token** _Bearer token_ for accessing the source registry.
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
### Synchronizing to a local directory
|
||||
```
|
||||
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
|
||||
```
|
||||
Images are located at:
|
||||
```
|
||||
/media/usb/busybox:1-glibc
|
||||
/media/usb/busybox:1-musl
|
||||
/media/usb/busybox:1-ubuntu
|
||||
...
|
||||
/media/usb/busybox:latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry from local
|
||||
Images are located at:
|
||||
```
|
||||
/media/usb/busybox:1-glibc
|
||||
```
|
||||
Sync run
|
||||
```
|
||||
$ skopeo sync --src dir --dest docker /media/usb/busybox:1-glibc my-registry.local.lan/test/
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
REPO TAGS
|
||||
my-registry.local.lan/test/busybox 1-glibc
|
||||
```
|
||||
|
||||
### Synchronizing to a local directory, scoped
|
||||
```
|
||||
$ skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb
|
||||
```
|
||||
Images are located at:
|
||||
```
|
||||
/media/usb/registry.example.com/busybox:1-glibc
|
||||
/media/usb/registry.example.com/busybox:1-musl
|
||||
/media/usb/registry.example.com/busybox:1-ubuntu
|
||||
...
|
||||
/media/usb/registry.example.com/busybox:latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry
|
||||
```
|
||||
skopeo sync --src docker --dest docker registry.example.com/busybox my-registry.local.lan
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
REPO TAGS
|
||||
registry.local.lan/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry keeping the repository
|
||||
```
|
||||
skopeo sync --src docker --dest docker registry.example.com/repo/busybox my-registry.local.lan/repo
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
REPO TAGS
|
||||
registry.local.lan/repo/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
|
||||
```
|
||||
|
||||
### YAML file content (used _source_ for `**--src yaml**`)
|
||||
|
||||
```yaml
|
||||
registry.example.com:
|
||||
images:
|
||||
busybox: []
|
||||
redis:
|
||||
- "1.0"
|
||||
- "2.0"
|
||||
- "sha256:0000000000000000000000000000000011111111111111111111111111111111"
|
||||
images-by-tag-regex:
|
||||
nginx: ^1\.13\.[12]-alpine-perl$
|
||||
credentials:
|
||||
username: john
|
||||
password: this is a secret
|
||||
tls-verify: true
|
||||
cert-dir: /home/john/certs
|
||||
quay.io:
|
||||
tls-verify: false
|
||||
images:
|
||||
coreos/etcd:
|
||||
- latest
|
||||
```
|
||||
If the yaml filename is `sync.yml`, sync run:
|
||||
```
|
||||
skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
|
||||
```
|
||||
This will copy the following images:
|
||||
- Repository `registry.example.com/busybox`: all images, as no tags are specified.
|
||||
- Repository `registry.example.com/redis`: images tagged "1.0" and "2.0" along with image with digest "sha256:0000000000000000000000000000000011111111111111111111111111111111".
|
||||
- Repository `registry.example.com/nginx`: images tagged "1.13.1-alpine-perl" and "1.13.2-alpine-perl".
|
||||
- Repository `quay.io/coreos/etcd`: images tagged "latest".
|
||||
|
||||
For the registry `registry.example.com`, the "john"/"this is a secret" credentials are used, with server TLS certificates located at `/home/john/certs`.
|
||||
|
||||
TLS verification is normally enabled, and it can be disabled setting `tls-verify` to `false`.
|
||||
In the above example, TLS verification is enabled for `registry.example.com`, while is
|
||||
disabled for `quay.io`.
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Flavio Castelli <fcastelli@suse.com>, Marco Vedovati <mvedovati@suse.com>
|
||||
105
docs/skopeo.1.md
Normal file
105
docs/skopeo.1.md
Normal file
@@ -0,0 +1,105 @@
|
||||
% SKOPEO(1) Skopeo Man Pages
|
||||
% Jhon Honce
|
||||
% August 2016
|
||||
## NAME
|
||||
skopeo -- Command line utility used to interact with local and remote container images and container image registries
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo** [_global options_] _command_ [_command options_]
|
||||
|
||||
## DESCRIPTION
|
||||
`skopeo` is a command line utility providing various operations with container images and container image registries.
|
||||
|
||||
`skopeo` can copy container images between various containers image stores, converting them as necessary. For example you can use `skopeo` to copy container images from one container registry to another.
|
||||
|
||||
`skopeo` can convert a Docker schema 2 or schema 1 container image to an OCI image.
|
||||
|
||||
`skopeo` can inspect a repository on a container registry without needlessly pulling the image. Pulling an image from a repository, especially a remote repository, is an expensive network and storage operation. Skopeo fetches the repository's manifest and displays a `docker inspect`-like json output about the repository or a tag. `skopeo`, in contrast to `docker inspect`, helps you gather useful information about a repository or a tag without requiring you to run `docker pull` - e.g. - Which tags are available for the given repository? Which labels does the image have?
|
||||
|
||||
`skopeo` can sign and verify container images.
|
||||
|
||||
`skopeo` can delete container images from a remote container registry.
|
||||
|
||||
Note: `skopeo` does not require any container runtimes to be running, to do most of
|
||||
its functionality. It also does not require root, unless you are copying images into a container runtime storage backend, like the docker daemon or github.com/containers/storage.
|
||||
|
||||
## IMAGE NAMES
|
||||
Most commands refer to container images, using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
**containers-storage:**_docker-reference_
|
||||
An image located in a local containers/storage image store. Both the location and image store are specified in /etc/containers/storage.conf. (Backend for Podman, CRI-O, Buildah and friends)
|
||||
|
||||
**dir:**_path_
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(skopeo login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
|
||||
**docker-daemon:**_docker-reference_
|
||||
An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can be docker-daemon:algo:digest (an image ID).
|
||||
|
||||
**oci:**_path_**:**_tag_
|
||||
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
**oci-archive:**_path_**:**_tag_
|
||||
An image _tag_ in a tar archive compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
See [containers-transports(5)](https://github.com/containers/image/blob/master/docs/containers-transports.5.md) for details.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--command-timeout** _duration_ Timeout for the command execution.
|
||||
|
||||
**--debug** enable debug output
|
||||
|
||||
**--help**|**-h** Show help
|
||||
|
||||
**--insecure-policy** Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
|
||||
|
||||
**--override-arch** _arch_ Use _arch_ instead of the architecture of the machine for choosing images.
|
||||
|
||||
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
|
||||
|
||||
**--override-variant** _VARIANT_ Use _VARIANT_ instead of the running architecture variant for choosing images.
|
||||
|
||||
**--policy** _path-to-policy_ Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
|
||||
|
||||
**--registries.d** _dir_ use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
|
||||
|
||||
**--tmpdir** _dir_ used to store temporary files. Defaults to /var/tmp.
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
## COMMANDS
|
||||
|
||||
| Command | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List the tags for the given transport/repository. |
|
||||
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Copy images from one or more repositories to a user specified destination. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md) .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md).
|
||||
|
||||
## SEE ALSO
|
||||
skopeo-login(1), docker-login(1), containers-auth.json(5), containers-storage.conf(5), containers-policy.json(5), containers-transports(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
546
docs/skopeo.svg
Normal file
546
docs/skopeo.svg
Normal file
@@ -0,0 +1,546 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="480.61456"
|
||||
height="472.66098"
|
||||
viewBox="0 0 127.1626 125.05822"
|
||||
version="1.1"
|
||||
id="svg8"
|
||||
inkscape:version="0.92.2 5c3e80d, 2017-08-06"
|
||||
sodipodi:docname="skopeo.svg"
|
||||
inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/skopeo.color.png"
|
||||
inkscape:export-xdpi="90"
|
||||
inkscape:export-ydpi="90">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84477">
|
||||
<stop
|
||||
style="stop-color:#0093d9;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84473" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84475" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84469">
|
||||
<stop
|
||||
style="stop-color:#f6e6c8;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84465" />
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84467" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84461">
|
||||
<stop
|
||||
style="stop-color:#bfdce8;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84457" />
|
||||
<stop
|
||||
style="stop-color:#2a72ac;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84459" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84420">
|
||||
<stop
|
||||
style="stop-color:#a7a9ac;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84416" />
|
||||
<stop
|
||||
style="stop-color:#e7e8e9;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84418" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84347">
|
||||
<stop
|
||||
style="stop-color:#2c2d2f;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84343" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84345" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84339">
|
||||
<stop
|
||||
style="stop-color:#002442;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84335" />
|
||||
<stop
|
||||
style="stop-color:#151617;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84337" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84331">
|
||||
<stop
|
||||
style="stop-color:#003d6e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84327" />
|
||||
<stop
|
||||
style="stop-color:#59b5ff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84329" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84323">
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84319" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84321" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84325"
|
||||
x1="221.5741"
|
||||
y1="250.235"
|
||||
x2="219.20772"
|
||||
y2="221.99771"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84331"
|
||||
id="linearGradient84333"
|
||||
x1="223.23239"
|
||||
y1="212.83418"
|
||||
x2="245.52328"
|
||||
y2="129.64345"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84341"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84347"
|
||||
id="linearGradient84349"
|
||||
x1="212.05453"
|
||||
y1="215.20055"
|
||||
x2="237.73705"
|
||||
y2="230.02835"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84363"
|
||||
x1="193.61516"
|
||||
y1="225.045"
|
||||
x2="224.08698"
|
||||
y2="223.54327"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84377"
|
||||
x1="182.72513"
|
||||
y1="222.54439"
|
||||
x2="184.01024"
|
||||
y2="210.35291"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84408"
|
||||
x1="211.73801"
|
||||
y1="225.48302"
|
||||
x2="204.24324"
|
||||
y2="238.46432"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84422"
|
||||
x1="190.931"
|
||||
y1="221.83777"
|
||||
x2="187.53873"
|
||||
y2="229.26593"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84425"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84441"
|
||||
x1="169.95944"
|
||||
y1="215.77036"
|
||||
x2="174.0289"
|
||||
y2="207.81528"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84455"
|
||||
x1="234.08092"
|
||||
y1="252.39755"
|
||||
x2="245.88477"
|
||||
y2="251.21777"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84461"
|
||||
id="radialGradient84463"
|
||||
cx="213.19594"
|
||||
cy="223.40646"
|
||||
fx="214.12064"
|
||||
fy="217.34077"
|
||||
r="33.39888"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(2.6813748,0.05304973,-0.0423372,2.1399146,-349.74924,-255.6421)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84469"
|
||||
id="radialGradient84471"
|
||||
cx="207.18298"
|
||||
cy="211.06483"
|
||||
fx="207.18298"
|
||||
fy="211.06483"
|
||||
r="2.77954"
|
||||
gradientTransform="matrix(1.4407627,0.18685239,-0.24637721,1.8997405,-38.989952,-218.98841)"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84477"
|
||||
id="linearGradient84479"
|
||||
x1="241.60336"
|
||||
y1="255.46982"
|
||||
x2="244.45177"
|
||||
y2="250.4846"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
</defs>
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1"
|
||||
inkscape:cx="517.27113"
|
||||
inkscape:cy="314.79773"
|
||||
inkscape:document-units="mm"
|
||||
inkscape:current-layer="layer1"
|
||||
inkscape:document-rotation="0"
|
||||
showgrid="false"
|
||||
units="px"
|
||||
inkscape:snap-global="false"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1376"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0" />
|
||||
<metadata
|
||||
id="metadata5">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-149.15784,-175.92614)">
|
||||
<g
|
||||
id="g84497"
|
||||
style="stroke-width:1.32291663;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
transform="translate(0,10.583333)">
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84485"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="299.48376"
|
||||
y="87.963303"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84487"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="258.07639"
|
||||
y="92.60083"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84489"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="253.2236"
|
||||
y="91.796227"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="86.859642"
|
||||
x="331.21924"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84491"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
</g>
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
id="path84483"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="cccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84481"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<circle
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="path84224"
|
||||
cx="213.64427"
|
||||
cy="234.18927"
|
||||
r="35.482784" />
|
||||
<circle
|
||||
r="33.39888"
|
||||
cy="234.18927"
|
||||
cx="213.64427"
|
||||
id="circle84226"
|
||||
style="fill:url(#radialGradient84463);fill-opacity:1;stroke:none;stroke-width:0.52916664;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84114"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="304.77545"
|
||||
y="97.128738"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84116"
|
||||
width="4.521956"
|
||||
height="21.377089"
|
||||
x="300.27435"
|
||||
y="96.025078"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="99.087395"
|
||||
x="283.71848"
|
||||
height="15.252436"
|
||||
width="16.459545"
|
||||
id="rect84118"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="98.190086"
|
||||
x="280.00021"
|
||||
height="17.047071"
|
||||
width="3.617183"
|
||||
id="rect84120"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84122"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="263.36807"
|
||||
y="101.76627"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84124"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="258.51526"
|
||||
y="100.96166"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="96.025078"
|
||||
x="336.51093"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84126"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84325);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 207.24023,252.71811 25.53907,14.74414 8.52539,-14.76953 -25.53711,-14.74415 z"
|
||||
id="rect84313"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84128"
|
||||
d="m 215.3335,241.36799 22.49734,12.98884"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84130"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 195.97877,212.80238 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z"
|
||||
id="path84134"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccccc" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
id="path84136"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84422);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 186.31445,239.41146 1.30078,0.75 7.46485,-12.92968 -1.30078,-0.75 z"
|
||||
id="rect84410"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84349);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 193.92188,218.48568 44.21289,25.55469 2.44335,-4.23242 -44.21289,-25.55664 z"
|
||||
id="path84284"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84363);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 189.98438,240.4935 12.42187,7.16992 6.56641,-11.375 -12.42188,-7.16992 z"
|
||||
id="rect84351"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84377);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 173.69727,227.99936 12.65234,7.30273 3.88867,-6.73633 -12.65234,-7.30273 z"
|
||||
id="rect84365"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84138"
|
||||
d="m 192.47621,218.8758 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<ellipse
|
||||
cy="223.01579"
|
||||
cx="207.08998"
|
||||
id="circle84140"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
rx="3.8395541"
|
||||
ry="3.8438656" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84333);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 197.35938,212.35287 44.36523,25.64453 7.58984,-10.83203 -20.82617,-18.73242 -25.55078,-8.08399 z"
|
||||
id="path84272"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84142"
|
||||
d="m 200.6837,212.37603 11.49279,-6.98413 -8.11935,-2.73742"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84144"
|
||||
d="m 241.31895,235.3047 -8.04514,-4.75769 10.057,-4.72299"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccc"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.52899998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 241.06868,235.79543 -8.9307,-5.38071 10.81942,-5.07707"
|
||||
id="path84280"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 200.60886,211.70589 10.37702,-6.1817 -7.12581,-2.30459"
|
||||
id="path84290"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
style="fill:url(#radialGradient84471);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 206.89258,220.23959 -0.29297,0.0352 -0.23633,0.0527 -0.26953,0.0898 -0.2793,0.125 -0.23437,0.13477 -0.20508,0.14648 -0.2207,0.19532 -0.18946,0.20117 -0.006,0.008 0.004,-0.008 -0.006,0.01 -0.008,0.01 -0.004,0.004 -0.006,0.006 -0.12109,0.1582 -0.002,0.004 -0.002,0.002 -0.16406,0.26758 -0.12109,0.24804 -0.0996,0.28125 -0.0645,0.24219 -0.0371,0.26367 -0.0176,0.31641 0.008,0.18164 0.0332,0.28711 0.0527,0.23437 0.004,0.0117 0.0937,0.28516 0.11133,0.24805 0.13086,0.23046 0.16992,0.23829 0.1836,0.20898 0.21093,0.19727 0.19532,0.14843 0.25586,0.15625 0.24218,0.11719 0.26172,0.0977 0.27344,0.0684 0.27344,0.043 0.29297,0.0137 0.18164,-0.008 0.29687,-0.0351 0.24024,-0.0547 0.27539,-0.0898 0.24218,-0.10938 0.25,-0.14453 0.23047,-0.16406 0.20899,-0.1836 0.20508,-0.21875 0.125,-0.16406 0.004,-0.006 0.1582,-0.25781 0.004,-0.008 0.12695,-0.26172 0.0996,-0.27344 0.002,-0.006 0.0586,-0.24023 0.0391,-0.26563 0.0176,-0.3125 -0.008,-0.17968 -0.0332,-0.28711 -0.0527,-0.23438 -0.004,-0.0117 -0.0937,-0.28515 -0.11132,-0.24805 -0.13086,-0.23047 -0.16993,-0.23828 -0.18554,-0.20899 -0.19922,-0.18945 -0.21875,-0.16406 -0.23828,-0.14844 -0.26563,-0.12695 -0.01,-0.004 -0.21875,-0.0801 -0.28516,-0.0723 -0.27344,-0.043 -0.29492,-0.0137 z"
|
||||
id="ellipse84292"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84425);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 183.23633,227.10092 c 5.59753,3.20336 12.36881,4.51528 18.71366,3.17108 1.59516,-0.38 3.17489,-0.99021 4.44874,-2.04739 -0.73893,-0.64617 -1.68301,-0.99544 -2.49844,-1.53493 -3.78032,-2.18293 -7.56064,-4.36587 -11.34096,-6.5488 -3.10767,2.32001 -6.21533,4.64003 -9.323,6.96004 z"
|
||||
id="path84298"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84479);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 238.62695,269.97787 0.006,-0.002 0.39453,-0.27735 0.41797,-0.34179 0.002,-0.002 0.45703,-0.42382 0.47851,-0.49219 0.0156,-0.0176 0.47656,-0.53711 0.002,-0.002 0.0117,-0.0137 0.48438,-0.5918 0.0117,-0.0156 0.49023,-0.64257 0.01,-0.0137 0.49609,-0.69726 0.48047,-0.71875 0.01,-0.0137 0.46485,-0.74805 0.004,-0.008 0.002,-0.002 0.30468,-0.51562 0.008,-0.0117 0.4375,-0.78711 0.40625,-0.77734 0.008,-0.0137 0.37109,-0.77149 0.008,-0.0156 0.33789,-0.75977 0.006,-0.0156 0.30078,-0.73829 0.27148,-0.74609 0.21289,-0.66602 0.17969,-0.66796 v -0.002 l 0.12305,-0.58203 0.002,-0.0137 0.0723,-0.51562 0.0176,-0.31836 z"
|
||||
id="path84379"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84408);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 202.78906,251.42318 2.08399,1.20118 9.6289,-16.67969 -2.08203,-1.20117 z"
|
||||
id="rect84396"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84441);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 169.0918,226.26889 2.35937,1.36133 4.69336,-8.13086 -2.35937,-1.36133 z"
|
||||
id="rect84429"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84455);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 234.17188,269.53842 2.08203,1.20312 9.63086,-16.67773 -2.08399,-1.20313 z"
|
||||
id="rect84443"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#f8ead2;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 215.55025,240.82707 22.49734,12.98884"
|
||||
id="path84521"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 24 KiB |
26
go.mod
Normal file
26
go.mod
Normal file
@@ -0,0 +1,26 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.36.0
|
||||
github.com/containers/image/v5 v5.11.0
|
||||
github.com/containers/ocicrypt v1.1.1
|
||||
github.com/containers/storage v1.29.0
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
|
||||
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
@@ -1,102 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PROJECT=github.com/runcom/skopeo
|
||||
|
||||
# Downloads dependencies into vendor/ directory
|
||||
mkdir -p vendor
|
||||
|
||||
export GOPATH="$GOPATH:${PWD}/vendor"
|
||||
|
||||
find="/usr/bin/find"
|
||||
|
||||
clone() {
|
||||
local vcs="$1"
|
||||
local pkg="$2"
|
||||
local rev="$3"
|
||||
local url="$4"
|
||||
|
||||
: ${url:=https://$pkg}
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo -n "$pkg @ $rev: "
|
||||
|
||||
if [ -d "$target" ]; then
|
||||
echo -n 'rm old, '
|
||||
rm -rf "$target"
|
||||
fi
|
||||
|
||||
echo -n 'clone, '
|
||||
case "$vcs" in
|
||||
git)
|
||||
git clone --quiet --no-checkout "$url" "$target"
|
||||
( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" )
|
||||
;;
|
||||
hg)
|
||||
hg clone --quiet --updaterev "$rev" "$url" "$target"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -n 'rm VCS, '
|
||||
( cd "$target" && rm -rf .{git,hg} )
|
||||
|
||||
echo -n 'rm vendor, '
|
||||
( cd "$target" && rm -rf vendor Godeps/_workspace )
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
clean() {
|
||||
local packages=(
|
||||
"${PROJECT}" # package main
|
||||
)
|
||||
local platforms=( linux/amd64 linux/386 )
|
||||
|
||||
local buildTags=( )
|
||||
|
||||
echo
|
||||
|
||||
echo -n 'collecting import graph, '
|
||||
local IFS=$'\n'
|
||||
local imports=( $(
|
||||
for platform in "${platforms[@]}"; do
|
||||
export GOOS="${platform%/*}";
|
||||
export GOARCH="${platform##*/}";
|
||||
go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}"
|
||||
go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}"
|
||||
done | grep -vE "^${PROJECT}" | sort -u
|
||||
) )
|
||||
imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") )
|
||||
unset IFS
|
||||
|
||||
echo -n 'pruning unused packages, '
|
||||
findArgs=(
|
||||
# This directory contains only .c and .h files which are necessary
|
||||
# -path vendor/src/github.com/mattn/go-sqlite3/code
|
||||
)
|
||||
for import in "${imports[@]}"; do
|
||||
[ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or )
|
||||
findArgs+=( -path "vendor/src/$import" )
|
||||
done
|
||||
local IFS=$'\n'
|
||||
local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') )
|
||||
unset IFS
|
||||
for dir in "${prune[@]}"; do
|
||||
$find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';'
|
||||
rmdir "$dir" 2>/dev/null || true
|
||||
done
|
||||
|
||||
echo -n 'pruning unused files, '
|
||||
$find vendor -type f -name '*_test.go' -exec rm -v '{}' ';'
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
# Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring
|
||||
fix_rewritten_imports () {
|
||||
local pkg="$1"
|
||||
local remove="${pkg}/Godeps/_workspace/src/"
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo "$pkg: fixing rewritten imports"
|
||||
$find "$target" -name \*.go -exec sed -i -e "s|\"${remove}|\"|g" {} \;
|
||||
}
|
||||
7
hack/btrfs_installed_tag.sh
Executable file
7
hack/btrfs_installed_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo exclude_graphdriver_btrfs
|
||||
fi
|
||||
7
hack/btrfs_tag.sh
Executable file
7
hack/btrfs_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo btrfs_noversion
|
||||
fi
|
||||
61
hack/get_ci_vm.sh
Executable file
61
hack/get_ci_vm.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# For help and usage information, simply execute the script w/o any arguments.
|
||||
#
|
||||
# This script is intended to be run by Red Hat skopeo developers who need
|
||||
# to debug problems specifically related to Cirrus-CI automated testing.
|
||||
# It requires that you have been granted prior access to create VMs in
|
||||
# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
|
||||
# with supervision upon request.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
||||
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
||||
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||
|
||||
# Help detect if we were called by get_ci_vm container
|
||||
GET_CI_VM="${GET_CI_VM:-0}"
|
||||
in_get_ci_vm() {
|
||||
if ((GET_CI_VM==0)); then
|
||||
echo "Error: $1 is not intended for use in this context"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
# get_ci_vm APIv1 container entrypoint calls into this script
|
||||
# to obtain required repo. specific configuration options.
|
||||
if [[ "$1" == "--config" ]]; then
|
||||
in_get_ci_vm "$1"
|
||||
cat <<EOF
|
||||
DESTDIR="/var/tmp/go/src/github.com/containers/skopeo"
|
||||
UPSTREAM_REPO="https://github.com/containers/skopeo.git"
|
||||
GCLOUD_PROJECT="skopeo"
|
||||
GCLOUD_IMGPROJECT="libpod-218412"
|
||||
GCLOUD_CFG="skopeo"
|
||||
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-f}"
|
||||
GCLOUD_CPUS="2"
|
||||
GCLOUD_MEMORY="4Gb"
|
||||
GCLOUD_DISK="200"
|
||||
EOF
|
||||
elif [[ "$1" == "--setup" ]]; then
|
||||
in_get_ci_vm "$1"
|
||||
# get_ci_vm container entrypoint calls us with this option on the
|
||||
# Cirrus-CI environment instance, to perform repo.-specific setup.
|
||||
cd $REPO_DIRPATH
|
||||
echo "+ No further setup performed" > /dev/stderr
|
||||
else
|
||||
# Create and access VM for specified Cirrus-CI task
|
||||
mkdir -p $HOME/.config/gcloud/ssh
|
||||
podman run -it --rm \
|
||||
--tz=local \
|
||||
-e NAME="$USER" \
|
||||
-e SRCDIR=/src \
|
||||
-e GCLOUD_ZONE="$GCLOUD_ZONE" \
|
||||
-e DEBUG="${DEBUG:-0}" \
|
||||
-v $REPO_DIRPATH:/src:O \
|
||||
-v $HOME/.config/gcloud:/root/.config/gcloud:z \
|
||||
-v $HOME/.config/gcloud/ssh:/root/.ssh:z \
|
||||
quay.io/libpod/get_ci_vm:latest "$@"
|
||||
fi
|
||||
14
hack/libdm_tag.sh
Executable file
14
hack/libdm_tag.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
cc -c -o "$tmpdir"/libdm_tag.o -x c - > /dev/null 2> /dev/null << EOF
|
||||
#include <libdevmapper.h>
|
||||
int main() {
|
||||
struct dm_task *task;
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo libdm_no_deferred_remove
|
||||
fi
|
||||
107
hack/make.sh
Executable file
107
hack/make.sh
Executable file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script builds various binary from a checkout of the skopeo
|
||||
# source code.
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the skopeo source code
|
||||
# (https://github.com/containers/skopeo). Whatever version is checked out
|
||||
# will be built.
|
||||
# - The script is intended to be run inside the docker container specified
|
||||
# in the Dockerfile at the root of the source. In other words:
|
||||
# DO NOT CALL THIS SCRIPT DIRECTLY.
|
||||
# - The right way to call this script is to invoke "make" from
|
||||
# your checkout of the skopeo repository.
|
||||
# the Makefile will do a "docker build -t skopeo ." and then
|
||||
# "docker run hack/make.sh" in the resulting image.
|
||||
#
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export SKOPEO_PKG='github.com/containers/skopeo'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
# We're a nice, sexy, little shell script, and people might try to run us;
|
||||
# but really, they shouldn't. We want to be in a container!
|
||||
# The magic value is defined inside our Dockerfile.
|
||||
if [[ "$container_magic" != "85531765-346b-4316-bdb8-358e4cca9e5d" ]]; then
|
||||
{
|
||||
echo "# WARNING! I don't seem to be running in a Docker container."
|
||||
echo "# The result of this command might be an incorrect build, and will not be"
|
||||
echo "# officially supported."
|
||||
echo "#"
|
||||
echo "# Try this instead: make all"
|
||||
echo "#"
|
||||
} >&2
|
||||
else
|
||||
echo "# I appear to be running inside my designated container image, good!"
|
||||
export SKOPEO_CONTAINER_TESTS=1
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
# List of bundles to create when no argument is passed
|
||||
# TODO(runcom): these are the one left from Docker...for now
|
||||
# test-unit
|
||||
# validate-dco
|
||||
# cover
|
||||
DEFAULT_BUNDLES=(
|
||||
validate-gofmt
|
||||
validate-lint
|
||||
validate-vet
|
||||
validate-git-marks
|
||||
|
||||
test-integration
|
||||
)
|
||||
|
||||
TESTFLAGS+=" -test.timeout=15m"
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources
|
||||
# See also the top-level Makefile.
|
||||
mod_vendor=
|
||||
if go help mod >/dev/null 2>&1; then
|
||||
export GO111MODULE=on
|
||||
mod_vendor='-mod=vendor'
|
||||
fi
|
||||
|
||||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
|
||||
#
|
||||
# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
|
||||
# to run certain tests on your local host, you should run with command:
|
||||
#
|
||||
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
|
||||
#
|
||||
go_test_dir() {
|
||||
dir=$1
|
||||
(
|
||||
echo '+ go test' $mod_vendor $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"} "${SKOPEO_PKG}${dir#.}"
|
||||
cd "$dir"
|
||||
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
|
||||
go test $mod_vendor $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"}
|
||||
)
|
||||
}
|
||||
|
||||
bundle() {
|
||||
local bundle="$1"; shift
|
||||
echo "---> Making bundle: $(basename "$bundle")"
|
||||
source "$SCRIPTDIR/make/$bundle" "$@"
|
||||
}
|
||||
|
||||
main() {
|
||||
if [ $# -lt 1 ]; then
|
||||
bundles=(${DEFAULT_BUNDLES[@]})
|
||||
else
|
||||
bundles=($@)
|
||||
fi
|
||||
for bundle in ${bundles[@]}; do
|
||||
bundle "$bundle"
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
main "$@"
|
||||
31
hack/make/.validate
Normal file
31
hack/make/.validate
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||
VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
||||
fi
|
||||
|
||||
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
|
||||
|
||||
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
|
||||
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
|
||||
|
||||
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
|
||||
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
|
||||
|
||||
validate_diff() {
|
||||
git diff "$VALIDATE_UPSTREAM" "$@"
|
||||
}
|
||||
validate_log() {
|
||||
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
||||
git log "$VALIDATE_COMMIT_LOG" "$@"
|
||||
fi
|
||||
}
|
||||
fi
|
||||
14
hack/make/test-integration
Executable file
14
hack/make/test-integration
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
bundle_test_integration() {
|
||||
TESTFLAGS="$TESTFLAGS -check.v"
|
||||
go_test_dir ./integration
|
||||
}
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
(
|
||||
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make PREFIX=/usr install
|
||||
bundle_test_integration
|
||||
) 2>&1
|
||||
18
hack/make/test-system
Executable file
18
hack/make/test-system
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Before running podman for the first time, make sure
|
||||
# to set storage to vfs (not overlay): podman-in-podman
|
||||
# doesn't work with overlay. And, disable mountopt,
|
||||
# which causes error with vfs.
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make PREFIX=/usr install
|
||||
|
||||
# Run tests
|
||||
SKOPEO_BINARY=/usr/bin/skopeo bats --tap systemtest
|
||||
44
hack/make/validate-git-marks
Executable file
44
hack/make/validate-git-marks
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
|
||||
# folders=$(find * -type d | egrep -v '^Godeps|bundles|.git')
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
if [ $(grep -r "^<<<<<<<" $f) ]; then
|
||||
badFiles+=( "$f" )
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ $(grep -r "^>>>>>>>" $f) ]; then
|
||||
badFiles+=( "$f" )
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ $(grep -r "^=======$" $f) ]; then
|
||||
badFiles+=( "$f" )
|
||||
continue
|
||||
fi
|
||||
set -e
|
||||
done
|
||||
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! There is no conflict.'
|
||||
else
|
||||
{
|
||||
echo "There is trace of conflict(s) in the following files :"
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
echo 'Please fix the conflict(s) commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
29
hack/make/validate-gofmt
Executable file
29
hack/make/validate-gofmt
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
if [ "$(gofmt -s -l < $f)" ]; then
|
||||
badFiles+=( "$f" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All Go source files are properly formatted.'
|
||||
else
|
||||
{
|
||||
echo "These files are not properly gofmt'd:"
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
echo 'Please reformat the above files using "gofmt -s -w" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
33
hack/make/validate-lint
Executable file
33
hack/make/validate-lint
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
|
||||
# We will eventually get to the point where packages should be the complete list
|
||||
# of subpackages, vendoring excluded, as given by:
|
||||
#
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/\|^integration' || true) )
|
||||
unset IFS
|
||||
|
||||
errors=()
|
||||
for f in "${files[@]}"; do
|
||||
failedLint=$(golint "$f")
|
||||
if [ "$failedLint" ]; then
|
||||
errors+=( "$failedLint" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#errors[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All Go source files have been linted.'
|
||||
else
|
||||
{
|
||||
echo "Errors from golint:"
|
||||
for err in "${errors[@]}"; do
|
||||
echo "$err"
|
||||
done
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "golint" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
16
hack/make/validate-vet
Executable file
16
hack/make/validate-vet
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
errors=$(go vet $mod_vendor $(go list $mod_vendor -e ./...))
|
||||
|
||||
if [ -z "$errors" ]; then
|
||||
echo 'Congratulations! All Go source files have been vetted.'
|
||||
else
|
||||
{
|
||||
echo "Errors from go vet:"
|
||||
echo "$errors"
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "go vet" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
17
hack/travis_osx.sh
Executable file
17
hack/travis_osx.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export GOPATH=$(pwd)/_gopath
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
_containers="${GOPATH}/src/github.com/containers"
|
||||
mkdir -vp ${_containers}
|
||||
ln -vsf $(pwd) ${_containers}/skopeo
|
||||
|
||||
go version
|
||||
GO111MODULE=off go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
|
||||
|
||||
cd ${_containers}/skopeo
|
||||
make validate-local test-unit-local bin/skopeo
|
||||
sudo make install
|
||||
skopeo -v
|
||||
13
hack/tree_status.sh
Executable file
13
hack/tree_status.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
if [[ -z $STATUS ]]
|
||||
then
|
||||
echo "tree is clean"
|
||||
else
|
||||
echo "tree is dirty, please commit all changes and sync the vendor.conf"
|
||||
echo ""
|
||||
echo "$STATUS"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$BASH_SOURCE")/.."
|
||||
rm -rf vendor/
|
||||
source 'hack/.vendor-helpers.sh'
|
||||
|
||||
clone git github.com/codegangsta/cli v1.2.0
|
||||
clone git github.com/Sirupsen/logrus v0.8.7 # logrus is a common dependency among multiple deps
|
||||
clone git github.com/docker/docker master
|
||||
clone git golang.org/x/net master https://github.com/golang/net.git
|
||||
clone git github.com/docker/engine-api master
|
||||
clone git github.com/docker/distribution v2.3.0-rc.1
|
||||
clone git github.com/docker/go-connections master
|
||||
clone git github.com/docker/go-units master
|
||||
clone git github.com/docker/libtrust master
|
||||
clone git github.com/opencontainers/runc master
|
||||
clone git github.com/vbatts/tar-split master
|
||||
clone git github.com/gorilla/mux master
|
||||
clone git github.com/gorilla/context master
|
||||
|
||||
clean
|
||||
|
||||
mv vendor/src/* vendor/
|
||||
303
inspect.go
303
inspect.go
@@ -1,303 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/docker/distribution/digest"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
"github.com/docker/docker/image"
|
||||
versionPkg "github.com/docker/docker/pkg/version"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
types "github.com/docker/engine-api/types"
|
||||
containerTypes "github.com/docker/engine-api/types/container"
|
||||
registryTypes "github.com/docker/engine-api/types/registry"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// fallbackError wraps an error that can possibly allow fallback to a different
|
||||
// endpoint.
|
||||
type fallbackError struct {
|
||||
// err is the error being wrapped.
|
||||
err error
|
||||
// confirmedV2 is set to true if it was confirmed that the registry
|
||||
// supports the v2 protocol. This is used to limit fallbacks to the v1
|
||||
// protocol.
|
||||
confirmedV2 bool
|
||||
}
|
||||
|
||||
// Error renders the FallbackError as a string.
|
||||
func (f fallbackError) Error() string {
|
||||
return f.err.Error()
|
||||
}
|
||||
|
||||
type manifestFetcher interface {
|
||||
Fetch(ctx context.Context, ref reference.Named) (*imageInspect, error)
|
||||
}
|
||||
|
||||
type imageInspect struct {
|
||||
Tag string
|
||||
Digest string
|
||||
RepoTags []string
|
||||
Comment string
|
||||
Created string
|
||||
ContainerConfig *containerTypes.Config
|
||||
DockerVersion string
|
||||
Author string
|
||||
Config *containerTypes.Config
|
||||
Architecture string
|
||||
Os string
|
||||
}
|
||||
|
||||
func validateName(name string) error {
|
||||
distref, err := distreference.ParseNamed(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hostname, _ := distreference.SplitHostname(distref)
|
||||
if hostname == "" {
|
||||
return fmt.Errorf("Please use a fully qualified repository name")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func inspect(c *cli.Context) (*imageInspect, error) {
|
||||
name := c.Args().First()
|
||||
if err := validateName(name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref, err := reference.ParseNamed(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imgInspect, err := getData(c, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return imgInspect, nil
|
||||
}
|
||||
|
||||
func getData(c *cli.Context, ref reference.Named) (*imageInspect, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authConfig, err := getAuthConfig(c, repoInfo.Index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateRepoName(repoInfo.Name()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//options := ®istry.Options{}
|
||||
//options.Mirrors = opts.NewListOpts(nil)
|
||||
//options.InsecureRegistries = opts.NewListOpts(nil)
|
||||
//options.InsecureRegistries.Set("0.0.0.0/0")
|
||||
//registryService := registry.NewService(options)
|
||||
registryService := registry.NewService(nil)
|
||||
//// TODO(runcom): hacky, provide a way of passing tls cert (flag?) to be used to lookup
|
||||
//for _, ic := range registryService.Config.IndexConfigs {
|
||||
//ic.Secure = false
|
||||
//}
|
||||
|
||||
endpoints, err := registryService.LookupPullEndpoints(repoInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
lastErr error
|
||||
discardNoSupportErrors bool
|
||||
imgInspect *imageInspect
|
||||
confirmedV2 bool
|
||||
)
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
// make sure I can reach the registry, same as docker pull does
|
||||
v1endpoint, err := endpoint.ToV1Endpoint(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := v1endpoint.Ping(); err != nil {
|
||||
if strings.Contains(err.Error(), "timeout") {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if confirmedV2 && endpoint.Version == registry.APIVersion1 {
|
||||
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Trying to fetch image manifest of %s repository from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
|
||||
|
||||
//fetcher, err := newManifestFetcher(endpoint, repoInfo, config)
|
||||
fetcher, err := newManifestFetcher(endpoint, repoInfo, authConfig, registryService)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
if imgInspect, err = fetcher.Fetch(ctx, ref); err != nil {
|
||||
// Was this fetch cancelled? If so, don't try to fall back.
|
||||
fallback := false
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
if fallbackErr, ok := err.(fallbackError); ok {
|
||||
fallback = true
|
||||
confirmedV2 = confirmedV2 || fallbackErr.confirmedV2
|
||||
err = fallbackErr.err
|
||||
}
|
||||
}
|
||||
if fallback {
|
||||
if _, ok := err.(registry.ErrNoSupport); !ok {
|
||||
// Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors.
|
||||
discardNoSupportErrors = true
|
||||
// save the current error
|
||||
lastErr = err
|
||||
} else if !discardNoSupportErrors {
|
||||
// Save the ErrNoSupport error, because it's either the first error or all encountered errors
|
||||
// were also ErrNoSupport errors.
|
||||
lastErr = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Not continuing with error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return imgInspect, nil
|
||||
}
|
||||
|
||||
if lastErr == nil {
|
||||
lastErr = fmt.Errorf("no endpoints found for %s", ref.String())
|
||||
}
|
||||
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
func newManifestFetcher(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, registryService *registry.Service) (manifestFetcher, error) {
|
||||
switch endpoint.Version {
|
||||
case registry.APIVersion2:
|
||||
return &v2ManifestFetcher{
|
||||
endpoint: endpoint,
|
||||
authConfig: authConfig,
|
||||
service: registryService,
|
||||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1ManifestFetcher{
|
||||
endpoint: endpoint,
|
||||
authConfig: authConfig,
|
||||
service: registryService,
|
||||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
|
||||
func getAuthConfig(c *cli.Context, index *registryTypes.IndexInfo) (types.AuthConfig, error) {
|
||||
var (
|
||||
username = c.GlobalString("username")
|
||||
password = c.GlobalString("password")
|
||||
cfg = c.GlobalString("docker-cfg")
|
||||
)
|
||||
if _, err := os.Stat(cfg); err != nil {
|
||||
logrus.Infof("Docker cli config file %q not found: %v, falling back to --username and --password if needed", cfg, err)
|
||||
return types.AuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Email: "stub@example.com",
|
||||
}, nil
|
||||
}
|
||||
confFile, err := cliconfig.Load(cfg)
|
||||
if err != nil {
|
||||
return types.AuthConfig{}, err
|
||||
}
|
||||
authConfig := registry.ResolveAuthConfig(confFile.AuthConfigs, index)
|
||||
logrus.Debugf("authConfig for %s: %v", index.Name, authConfig)
|
||||
|
||||
return authConfig, nil
|
||||
}
|
||||
|
||||
func validateRepoName(name string) error {
|
||||
if name == "" {
|
||||
return fmt.Errorf("Repository name can't be empty")
|
||||
}
|
||||
if name == api.NoBaseImageSpecifier {
|
||||
return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeImageInspect(img *image.Image, tag string, dgst digest.Digest, tagList []string) *imageInspect {
|
||||
var digest string
|
||||
if err := dgst.Validate(); err == nil {
|
||||
digest = dgst.String()
|
||||
}
|
||||
return &imageInspect{
|
||||
Tag: tag,
|
||||
Digest: digest,
|
||||
RepoTags: tagList,
|
||||
Comment: img.Comment,
|
||||
Created: img.Created.Format(time.RFC3339Nano),
|
||||
ContainerConfig: &img.ContainerConfig,
|
||||
DockerVersion: img.DockerVersion,
|
||||
Author: img.Author,
|
||||
Config: img.Config,
|
||||
Architecture: img.Architecture,
|
||||
Os: img.OS,
|
||||
}
|
||||
}
|
||||
|
||||
func makeRawConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) (map[string]*json.RawMessage, error) {
|
||||
var dver struct {
|
||||
DockerVersion string `json:"docker_version"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(imageJSON, &dver); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
useFallback := versionPkg.Version(dver.DockerVersion).LessThan("1.8.3")
|
||||
|
||||
if useFallback {
|
||||
var v1Image image.V1Image
|
||||
err := json.Unmarshal(imageJSON, &v1Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imageJSON, err = json.Marshal(v1Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var c map[string]*json.RawMessage
|
||||
if err := json.Unmarshal(imageJSON, &c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c["rootfs"] = rawJSON(rootfs)
|
||||
c["history"] = rawJSON(history)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func rawJSON(value interface{}) *json.RawMessage {
|
||||
jsonval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return (*json.RawMessage)(&jsonval)
|
||||
}
|
||||
167
inspect_v1.go
167
inspect_v1.go
@@ -1,167 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/engine-api/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type v1ManifestFetcher struct {
|
||||
endpoint registry.APIEndpoint
|
||||
repoInfo *registry.RepositoryInfo
|
||||
repo distribution.Repository
|
||||
confirmedV2 bool
|
||||
// wrap in a config?
|
||||
authConfig types.AuthConfig
|
||||
service *registry.Service
|
||||
session *registry.Session
|
||||
}
|
||||
|
||||
func (mf *v1ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*imageInspect, error) {
|
||||
var (
|
||||
imgInspect *imageInspect
|
||||
)
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
// Allowing fallback, because HTTPS v1 is before HTTP v2
|
||||
return nil, fallbackError{err: registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}}
|
||||
}
|
||||
tlsConfig, err := mf.service.TLSConfig(mf.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
|
||||
tr := transport.NewTransport(
|
||||
registry.NewTransport(tlsConfig),
|
||||
//registry.DockerHeaders(mf.config.MetaHeaders)...,
|
||||
registry.DockerHeaders(nil)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
//v1Endpoint, err := mf.endpoint.ToV1Endpoint(mf.config.MetaHeaders)
|
||||
v1Endpoint, err := mf.endpoint.ToV1Endpoint(nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not get v1 endpoint: %v", err)
|
||||
return nil, fallbackError{err: err}
|
||||
}
|
||||
mf.session, err = registry.NewSession(client, &mf.authConfig, v1Endpoint)
|
||||
if err != nil {
|
||||
logrus.Debugf("Fallback from error: %s", err)
|
||||
return nil, fallbackError{err: err}
|
||||
}
|
||||
imgInspect, err = mf.fetchWithSession(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return imgInspect, nil
|
||||
}
|
||||
|
||||
func (mf *v1ManifestFetcher) fetchWithSession(ctx context.Context, ref reference.Named) (*imageInspect, error) {
|
||||
repoData, err := mf.session.GetRepositoryData(mf.repoInfo)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "HTTP code: 404") {
|
||||
return nil, fmt.Errorf("Error: image %s not found", mf.repoInfo.RemoteName())
|
||||
}
|
||||
// Unexpected HTTP error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tagsList map[string]string
|
||||
tagsList, err = mf.session.GetRemoteTags(repoData.Endpoints, mf.repoInfo)
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to get remote tags: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("Retrieving the tag list")
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
var tagID, tag string
|
||||
if isTagged {
|
||||
tag = tagged.Tag()
|
||||
tagsList[tagged.Tag()] = tagID
|
||||
} else {
|
||||
ref, err = reference.WithTag(ref, reference.DefaultTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagged, _ := ref.(reference.NamedTagged)
|
||||
tag = tagged.Tag()
|
||||
tagsList[tagged.Tag()] = tagID
|
||||
}
|
||||
tagID, err = mf.session.GetRemoteTag(repoData.Endpoints, mf.repoInfo, tag)
|
||||
if err == registry.ErrRepoNotFound {
|
||||
return nil, fmt.Errorf("Tag %s not found in repository %s", tag, mf.repoInfo.FullName())
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to get remote tags: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tagList := []string{}
|
||||
for tag := range tagsList {
|
||||
tagList = append(tagList, tag)
|
||||
}
|
||||
|
||||
img := repoData.ImgList[tagID]
|
||||
|
||||
var pulledImg *image.Image
|
||||
for _, ep := range mf.repoInfo.Index.Mirrors {
|
||||
if pulledImg, err = mf.pullImageJSON(img.ID, ep, repoData.Tokens); err != nil {
|
||||
// Don't report errors when pulling from mirrors.
|
||||
logrus.Debugf("Error pulling image json of %s:%s, mirror: %s, %s", mf.repoInfo.FullName(), img.Tag, ep, err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if pulledImg == nil {
|
||||
for _, ep := range repoData.Endpoints {
|
||||
if pulledImg, err = mf.pullImageJSON(img.ID, ep, repoData.Tokens); err != nil {
|
||||
// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
|
||||
logrus.Infof("Error pulling image json of %s:%s, endpoint: %s, %v", mf.repoInfo.FullName(), img.Tag, ep, err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, mf.repoInfo.FullName(), err)
|
||||
}
|
||||
if pulledImg == nil {
|
||||
return nil, fmt.Errorf("No such image %s:%s", mf.repoInfo.FullName(), tag)
|
||||
}
|
||||
|
||||
return makeImageInspect(pulledImg, tag, "", tagList), nil
|
||||
}
|
||||
|
||||
func (mf *v1ManifestFetcher) pullImageJSON(imgID, endpoint string, token []string) (*image.Image, error) {
|
||||
imgJSON, _, err := mf.session.GetRemoteImageJSON(imgID, endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := v1.HistoryFromConfig(imgJSON, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configRaw, err := makeRawConfigFromV1Config(imgJSON, image.NewRootFS(), []image.History{h})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, err := json.Marshal(configRaw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img, err := image.NewFromJSON(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
473
inspect_v2.go
473
inspect_v2.go
@@ -1,473 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
dockerdistribution "github.com/docker/docker/distribution"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/engine-api/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type v2ManifestFetcher struct {
|
||||
endpoint registry.APIEndpoint
|
||||
repoInfo *registry.RepositoryInfo
|
||||
repo distribution.Repository
|
||||
confirmedV2 bool
|
||||
// wrap in a config?
|
||||
authConfig types.AuthConfig
|
||||
service *registry.Service
|
||||
}
|
||||
|
||||
func (mf *v2ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*imageInspect, error) {
|
||||
var (
|
||||
imgInspect *imageInspect
|
||||
err error
|
||||
)
|
||||
|
||||
//mf.repo, mf.confirmedV2, err = distribution.NewV2Repository(ctx, mf.repoInfo, mf.endpoint, mf.config.MetaHeaders, mf.config.AuthConfig, "pull")
|
||||
mf.repo, mf.confirmedV2, err = dockerdistribution.NewV2Repository(ctx, mf.repoInfo, mf.endpoint, nil, &mf.authConfig, "pull")
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting v2 registry: %v", err)
|
||||
return nil, fallbackError{err: err, confirmedV2: mf.confirmedV2}
|
||||
}
|
||||
|
||||
imgInspect, err = mf.fetchWithRepository(ctx, ref)
|
||||
if err != nil {
|
||||
if _, ok := err.(fallbackError); ok {
|
||||
return nil, err
|
||||
}
|
||||
if registry.ContinueOnError(err) {
|
||||
err = fallbackError{err: err, confirmedV2: mf.confirmedV2}
|
||||
}
|
||||
}
|
||||
return imgInspect, err
|
||||
}
|
||||
|
||||
func (mf *v2ManifestFetcher) fetchWithRepository(ctx context.Context, ref reference.Named) (*imageInspect, error) {
|
||||
var (
|
||||
manifest distribution.Manifest
|
||||
tagOrDigest string // Used for logging/progress only
|
||||
tagList = []string{}
|
||||
)
|
||||
|
||||
manSvc, err := mf.repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, isTagged := ref.(reference.NamedTagged); !isTagged {
|
||||
ref, err = reference.WithTag(ref, reference.DefaultTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
||||
// NOTE: not using TagService.Get, since it uses HEAD requests
|
||||
// against the manifests endpoint, which are not supported by
|
||||
// all registry versions.
|
||||
manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
|
||||
if err != nil {
|
||||
return nil, allowV1Fallback(err)
|
||||
}
|
||||
tagOrDigest = tagged.Tag()
|
||||
} else if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||
manifest, err = manSvc.Get(ctx, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagOrDigest = digested.Digest().String()
|
||||
} else {
|
||||
return nil, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
|
||||
}
|
||||
|
||||
if manifest == nil {
|
||||
return nil, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
|
||||
}
|
||||
|
||||
// If manSvc.Get succeeded, we can be confident that the registry on
|
||||
// the other side speaks the v2 protocol.
|
||||
mf.confirmedV2 = true
|
||||
|
||||
tagList, err = mf.repo.Tags(ctx).All(ctx)
|
||||
if err != nil {
|
||||
// If this repository doesn't exist on V2, we should
|
||||
// permit a fallback to V1.
|
||||
return nil, allowV1Fallback(err)
|
||||
}
|
||||
|
||||
var (
|
||||
image *image.Image
|
||||
manifestDigest digest.Digest
|
||||
)
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
image, manifestDigest, err = mf.pullSchema1(ctx, ref, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
image, manifestDigest, err = mf.pullSchema2(ctx, ref, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
image, manifestDigest, err = mf.pullManifestList(ctx, ref, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unsupported manifest format")
|
||||
}
|
||||
|
||||
// TODO(runcom)
|
||||
//var showTags bool
|
||||
//if reference.IsNameOnly(ref) {
|
||||
//showTags = true
|
||||
//logrus.Debug("Using default tag: latest")
|
||||
//ref = reference.WithDefaultTag(ref)
|
||||
//}
|
||||
//_ = showTags
|
||||
return makeImageInspect(image, tagOrDigest, manifestDigest, tagList), nil
|
||||
}
|
||||
|
||||
func (mf *v2ManifestFetcher) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (img *image.Image, manifestDigest digest.Digest, err error) {
|
||||
var verifiedManifest *schema1.Manifest
|
||||
verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// remove duplicate layers and check parent chain validity
|
||||
err = fixManifestLayers(verifiedManifest)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// Image history converted to the new format
|
||||
var history []image.History
|
||||
|
||||
// Note that the order of this loop is in the direction of bottom-most
|
||||
// to top-most, so that the downloads slice gets ordered correctly.
|
||||
for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
|
||||
var throwAway struct {
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
history = append(history, h)
|
||||
}
|
||||
|
||||
rootFS := image.NewRootFS()
|
||||
configRaw, err := makeRawConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history)
|
||||
|
||||
config, err := json.Marshal(configRaw)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
img, err = image.NewFromJSON(config)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
|
||||
|
||||
return img, manifestDigest, nil
|
||||
}
|
||||
|
||||
func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
|
||||
// If pull by digest, then verify the manifest digest. NOTE: It is
|
||||
// important to do this first, before any other content validation. If the
|
||||
// digest cannot be verified, don't even bother with those other things.
|
||||
if digested, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
verifier, err := digest.NewDigestVerifier(digested.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := verifier.Write(signedManifest.Canonical); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
|
||||
logrus.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m = &signedManifest.Manifest
|
||||
|
||||
if m.SchemaVersion != 1 {
|
||||
return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
|
||||
}
|
||||
if len(m.FSLayers) != len(m.History) {
|
||||
return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
|
||||
}
|
||||
if len(m.FSLayers) == 0 {
|
||||
return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func fixManifestLayers(m *schema1.Manifest) error {
|
||||
imgs := make([]*image.V1Image, len(m.FSLayers))
|
||||
for i := range m.FSLayers {
|
||||
img := &image.V1Image{}
|
||||
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs[i] = img
|
||||
if err := v1.ValidateID(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
|
||||
// Windows base layer can point to a base layer parent that is not in manifest.
|
||||
return errors.New("Invalid parent ID in the base layer of the image.")
|
||||
}
|
||||
|
||||
// check general duplicates to error instead of a deadlock
|
||||
idmap := make(map[string]struct{})
|
||||
|
||||
var lastID string
|
||||
for _, img := range imgs {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
}
|
||||
lastID = img.ID
|
||||
idmap[lastID] = struct{}{}
|
||||
}
|
||||
|
||||
// backwards loop so that we keep the remaining indexes after removing items
|
||||
for i := len(imgs) - 2; i >= 0; i-- {
|
||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
|
||||
m.History = append(m.History[:i], m.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mf *v2ManifestFetcher) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (img *image.Image, manifestDigest digest.Digest, err error) {
|
||||
manifestDigest, err = schema2ManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
target := mfst.Target()
|
||||
|
||||
configChan := make(chan []byte, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
|
||||
// Pull the image config
|
||||
go func() {
|
||||
configJSON, err := mf.pullSchema2ImageConfig(ctx, target.Digest)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
configChan <- configJSON
|
||||
}()
|
||||
|
||||
var (
|
||||
configJSON []byte // raw serialized image config
|
||||
unmarshalledConfig image.Image // deserialized image config
|
||||
)
|
||||
if runtime.GOOS == "windows" {
|
||||
configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if unmarshalledConfig.RootFS == nil {
|
||||
return nil, "", errors.New("image config has no rootfs section")
|
||||
}
|
||||
}
|
||||
|
||||
if configJSON == nil {
|
||||
configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
img, err = image.NewFromJSON(configJSON)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return img, manifestDigest, nil
|
||||
}
|
||||
|
||||
func (mf *v2ManifestFetcher) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
|
||||
blobs := mf.repo.Blobs(ctx)
|
||||
configJSON, err = blobs.Get(ctx, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify image config digest
|
||||
verifier, err := digest.NewDigestVerifier(dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := verifier.Write(configJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("image config verification failed for digest %s", dgst)
|
||||
logrus.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return configJSON, nil
|
||||
}
|
||||
|
||||
func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
|
||||
select {
|
||||
case configJSON := <-configChan:
|
||||
var unmarshalledConfig image.Image
|
||||
if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
|
||||
return nil, image.Image{}, err
|
||||
}
|
||||
return configJSON, unmarshalledConfig, nil
|
||||
case err := <-errChan:
|
||||
return nil, image.Image{}, err
|
||||
// Don't need a case for ctx.Done in the select because cancellation
|
||||
// will trigger an error in p.pullSchema2ImageConfig.
|
||||
}
|
||||
}
|
||||
|
||||
// allowV1Fallback checks if the error is a possible reason to fallback to v1
|
||||
// (even if confirmedV2 has been set already), and if so, wraps the error in
|
||||
// a fallbackError with confirmedV2 set to false. Otherwise, it returns the
|
||||
// error unmodified.
|
||||
func allowV1Fallback(err error) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) != 0 {
|
||||
if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) {
|
||||
return fallbackError{err: err, confirmedV2: false}
|
||||
}
|
||||
}
|
||||
case errcode.Error:
|
||||
if registry.ShouldV2Fallback(v) {
|
||||
return fallbackError{err: err, confirmedV2: false}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// schema2ManifestDigest computes the manifest digest, and, if pulling by
|
||||
// digest, ensures that it matches the requested digest.
|
||||
func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
|
||||
_, canonical, err := mfst.Payload()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If pull by digest, then verify the manifest digest.
|
||||
if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||
verifier, err := digest.NewDigestVerifier(digested.Digest())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := verifier.Write(canonical); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
|
||||
logrus.Error(err)
|
||||
return "", err
|
||||
}
|
||||
return digested.Digest(), nil
|
||||
}
|
||||
|
||||
return digest.FromBytes(canonical), nil
|
||||
}
|
||||
|
||||
// pullManifestList handles "manifest lists" which point to various
|
||||
// platform-specifc manifests.
|
||||
func (mf *v2ManifestFetcher) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (img *image.Image, manifestListDigest digest.Digest, err error) {
|
||||
manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
var manifestDigest digest.Digest
|
||||
for _, manifestDescriptor := range mfstList.Manifests {
|
||||
// TODO(aaronl): The manifest list spec supports optional
|
||||
// "features" and "variant" fields. These are not yet used.
|
||||
// Once they are, their values should be interpreted here.
|
||||
if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
|
||||
manifestDigest = manifestDescriptor.Digest
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if manifestDigest == "" {
|
||||
return nil, "", errors.New("no supported platform found in manifest list")
|
||||
}
|
||||
|
||||
manSvc, err := mf.repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
manifest, err := manSvc.Get(ctx, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
manifestRef, err := reference.WithDigest(ref, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
img, _, err = mf.pullSchema1(ctx, manifestRef, v)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
img, _, err = mf.pullSchema2(ctx, manifestRef, v)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
default:
|
||||
return nil, "", errors.New("unsupported manifest format")
|
||||
}
|
||||
|
||||
return img, manifestListDigest, err
|
||||
}
|
||||
261
install.md
Normal file
261
install.md
Normal file
@@ -0,0 +1,261 @@
|
||||
# Installing from packages
|
||||
|
||||
## Distribution Packages
|
||||
`skopeo` may already be packaged in your distribution.
|
||||
|
||||
### Fedora
|
||||
|
||||
```sh
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
### RHEL/CentOS ≥ 8 and CentOS Stream
|
||||
|
||||
```sh
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
Newer Skopeo releases may be available on the repositories provided by the
|
||||
Kubic project. Beware, these may not be suitable for production environments.
|
||||
|
||||
on CentOS 8:
|
||||
|
||||
```sh
|
||||
sudo dnf -y module disable container-tools
|
||||
sudo dnf -y install 'dnf-command(copr)'
|
||||
sudo dnf -y copr enable rhcontainerbot/container-selinux
|
||||
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
on CentOS 8 Stream:
|
||||
|
||||
```sh
|
||||
sudo dnf -y module disable container-tools
|
||||
sudo dnf -y install 'dnf-command(copr)'
|
||||
sudo dnf -y copr enable rhcontainerbot/container-selinux
|
||||
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
### RHEL/CentOS ≤ 7.x
|
||||
|
||||
```sh
|
||||
sudo yum -y install skopeo
|
||||
```
|
||||
|
||||
Newer Skopeo releases may be available on the repositories provided by the
|
||||
Kubic project. Beware, these may not be suitable for production environments.
|
||||
|
||||
```sh
|
||||
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_7/devel:kubic:libcontainers:stable.repo
|
||||
sudo yum -y install skopeo
|
||||
```
|
||||
|
||||
### openSUSE
|
||||
|
||||
```sh
|
||||
sudo zypper install skopeo
|
||||
```
|
||||
|
||||
### Alpine
|
||||
|
||||
```sh
|
||||
sudo apk add skopeo
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```sh
|
||||
brew install skopeo
|
||||
```
|
||||
|
||||
### Nix / NixOS
|
||||
```sh
|
||||
$ nix-env -i skopeo
|
||||
```
|
||||
|
||||
### Debian
|
||||
|
||||
The skopeo package is available in
|
||||
the [Bullseye (testing) branch](https://packages.debian.org/bullseye/skopeo), which
|
||||
will be the next stable release (Debian 11) as well as Debian Unstable/Sid.
|
||||
|
||||
```bash
|
||||
# Debian Testing/Bullseye or Unstable/Sid
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
If you would prefer newer (though not as well-tested) packages,
|
||||
the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
provides packages for Debian 10 and newer. The packages in Kubic project repos are more frequently
|
||||
updated than the one in Debian's official repositories, due to how Debian works.
|
||||
The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/skopeo/-/tree/debian/debian).
|
||||
|
||||
CAUTION: On Debian 11 and newer, including Debian Testing and Sid, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo
|
||||
OR the official Debian repos. Mixing and matching may lead to unpredictable situations including installation conflicts.
|
||||
|
||||
```bash
|
||||
# Debian 10
|
||||
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
|
||||
# Debian Testing
|
||||
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
|
||||
# Debian Sid/Unstable
|
||||
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
|
||||
### Raspberry Pi OS armhf (ex Raspbian)
|
||||
|
||||
The Kubic project provides packages for Raspbian 10.
|
||||
|
||||
```bash
|
||||
# Raspbian 10
|
||||
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/Release.key | sudo apt-key add -
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install skopeo
|
||||
```
|
||||
|
||||
|
||||
### Raspberry Pi OS arm64 (beta)
|
||||
|
||||
Raspberry Pi OS uses the standard Debian's repositories,
|
||||
so it is fully compatible with Debian's arm64 repository.
|
||||
You can simply follow the [steps for Debian](#debian) to install Skopeo.
|
||||
|
||||
|
||||
### Ubuntu
|
||||
|
||||
The skopeo package is available in the official repositories for Ubuntu 20.10
|
||||
and newer.
|
||||
|
||||
```bash
|
||||
# Ubuntu 20.10 and newer
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
If you would prefer newer (though not as well-tested) packages,
|
||||
the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
provides packages for active Ubuntu releases 18.04 and newer (it should also work with direct derivatives like Pop!\_OS).
|
||||
Checkout the [Kubic project page](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
for a list of supported Ubuntu version and
|
||||
architecture combinations. **NOTE:** The command `sudo apt-get -y upgrade`
|
||||
maybe required in some cases if Skopeo cannot be installed without it.
|
||||
The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/skopeo/-/tree/debian/debian).
|
||||
|
||||
CAUTION: On Ubuntu 20.10 and newer, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo
|
||||
OR the official Ubuntu repos. Mixing and matching may lead to unpredictable situations including installation conflicts.
|
||||
|
||||
```bash
|
||||
. /etc/os-release
|
||||
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y upgrade
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
To build the `skopeo` binary you need at least Go 1.12.
|
||||
|
||||
There are two ways to build skopeo: in a container, or locally without a
|
||||
container. Choose the one which better matches your needs and environment.
|
||||
|
||||
## Building from Source
|
||||
|
||||
### Building without a container
|
||||
|
||||
Building without a container requires a bit more manual work and setup in your
|
||||
environment, but it is more flexible:
|
||||
|
||||
- It should work in more environments (e.g. for native macOS builds)
|
||||
- It does not require root privileges (after dependencies are installed)
|
||||
- It is faster, therefore more convenient for developing `skopeo`.
|
||||
|
||||
Install the necessary dependencies:
|
||||
|
||||
```bash
|
||||
# Fedora:
|
||||
sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel
|
||||
```
|
||||
|
||||
```bash
|
||||
# Ubuntu (`libbtrfs-dev` requires Ubuntu 18.10 and above):
|
||||
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev
|
||||
```
|
||||
|
||||
```bash
|
||||
# macOS:
|
||||
brew install gpgme
|
||||
```
|
||||
|
||||
```bash
|
||||
# openSUSE:
|
||||
sudo zypper install libgpgme-devel device-mapper-devel libbtrfs-devel glib2-devel
|
||||
```
|
||||
|
||||
Make sure to clone this repository in your `GOPATH` - otherwise compilation fails.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
|
||||
cd $GOPATH/src/github.com/containers/skopeo && make bin/skopeo
|
||||
```
|
||||
|
||||
By default the `make` command (make all) will build bin/skopeo and the documentation locally.
|
||||
|
||||
### Building documentation
|
||||
|
||||
To build the manual you will need go-md2man.
|
||||
|
||||
```bash
|
||||
# Debian:
|
||||
sudo apt-get install go-md2man
|
||||
```
|
||||
|
||||
```
|
||||
# Fedora:
|
||||
sudo dnf install go-md2man
|
||||
```
|
||||
|
||||
Then
|
||||
|
||||
```bash
|
||||
make docs
|
||||
```
|
||||
|
||||
### Building in a container
|
||||
|
||||
Building in a container is simpler, but more restrictive:
|
||||
|
||||
- It requires the `podman` command and the ability to run Linux containers.
|
||||
- The created executable is a Linux executable, and depends on dynamic libraries
|
||||
which may only be available only in a container of a similar Linux
|
||||
distribution.
|
||||
|
||||
```bash
|
||||
$ make binary
|
||||
```
|
||||
|
||||
### Installation
|
||||
|
||||
Finally, after the binary and documentation is built:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
34
integration/blocked_test.go
Normal file
34
integration/blocked_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
const blockedRegistriesConf = "./fixtures/blocked-registries.conf"
|
||||
const blockedErrorRegex = `.*registry registry-blocked.com is blocked in .*`
|
||||
|
||||
func (s *SkopeoSuite) TestCopyBlockedSource(c *check.C) {
|
||||
assertSkopeoFails(c, blockedErrorRegex,
|
||||
"--registries-conf", blockedRegistriesConf, "copy",
|
||||
"docker://registry-blocked.com/image:test",
|
||||
"docker://registry-unblocked.com/image:test")
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopyBlockedDestination(c *check.C) {
|
||||
assertSkopeoFails(c, blockedErrorRegex,
|
||||
"--registries-conf", blockedRegistriesConf, "copy",
|
||||
"docker://registry-unblocked.com/image:test",
|
||||
"docker://registry-blocked.com/image:test")
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestInspectBlocked(c *check.C) {
|
||||
assertSkopeoFails(c, blockedErrorRegex,
|
||||
"--registries-conf", blockedRegistriesConf, "inspect",
|
||||
"docker://registry-blocked.com/image:test")
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestDeleteBlocked(c *check.C) {
|
||||
assertSkopeoFails(c, blockedErrorRegex,
|
||||
"--registries-conf", blockedRegistriesConf, "delete",
|
||||
"docker://registry-blocked.com/image:test")
|
||||
}
|
||||
113
integration/check_test.go
Normal file
113
integration/check_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
const (
|
||||
privateRegistryURL0 = "127.0.0.1:5000"
|
||||
privateRegistryURL1 = "127.0.0.1:5001"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&SkopeoSuite{})
|
||||
}
|
||||
|
||||
type SkopeoSuite struct {
|
||||
regV2 *testRegistryV2
|
||||
regV2WithAuth *testRegistryV2
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) SetUpSuite(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
s.regV2 = setupRegistryV2At(c, privateRegistryURL0, false, false)
|
||||
s.regV2WithAuth = setupRegistryV2At(c, privateRegistryURL1, true, false)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
if s.regV2 != nil {
|
||||
s.regV2.Close()
|
||||
}
|
||||
if s.regV2WithAuth != nil {
|
||||
//cmd := exec.Command("docker", "logout", s.regV2WithAuth)
|
||||
//c.Assert(cmd.Run(), check.IsNil)
|
||||
s.regV2WithAuth.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO like dockerCmd but much easier, just out,err
|
||||
//func skopeoCmd()
|
||||
|
||||
func (s *SkopeoSuite) TestVersion(c *check.C) {
|
||||
wanted := fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version)
|
||||
assertSkopeoSucceeds(c, wanted, "--version")
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCanAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
|
||||
wanted := ".*manifest unknown: manifest unknown.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", "--creds="+s.regV2WithAuth.username+":"+s.regV2WithAuth.password, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
|
||||
wanted := ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
|
||||
wanted := ".*unknown flag: --cert-path.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
|
||||
}
|
||||
|
||||
// TODO(runcom): as soon as we can push to registries ensure you can inspect here
|
||||
// not just get image not found :)
|
||||
func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C) {
|
||||
out, err := exec.Command(skopeoBinary, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2.url)).CombinedOutput()
|
||||
c.Assert(err, check.NotNil, check.Commentf(string(out)))
|
||||
wanted := ".*manifest unknown.*"
|
||||
c.Assert(string(out), check.Matches, "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
c.Assert(string(out), check.Not(check.Matches), "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
assertSkopeoFails(c, `.*Invalid image name.*`, "inspect", "unknown")
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestLoginLogout(c *check.C) {
|
||||
wanted := "^Login Succeeded!\n$"
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// test --get-login returns username
|
||||
wanted = fmt.Sprintf("^%s\n$", s.regV2WithAuth.username)
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--get-login", s.regV2WithAuth.url)
|
||||
// test logout
|
||||
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
|
||||
wanted := "^Login Succeeded!\n$"
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// copy to private registry using local authentication
|
||||
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://docker.io/library/busybox:latest", imageName)
|
||||
// inspect from private registry
|
||||
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
|
||||
// logout from the registry
|
||||
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
|
||||
// inspect from private registry should fail after logout
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "inspect", "--tls-verify=false", imageName)
|
||||
}
|
||||
1298
integration/copy_test.go
Normal file
1298
integration/copy_test.go
Normal file
File diff suppressed because it is too large
Load Diff
23
integration/decompress-dirs.sh
Executable file
23
integration/decompress-dirs.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash -e
|
||||
# Account for differences between dir: images that are solely due to one being
|
||||
# compressed (fresh from a registry) and the other not being compressed (read
|
||||
# from storage, which decompressed it and had to reassemble the layer blobs).
|
||||
for dir in "$@" ; do
|
||||
# Updating the manifest's blob digests may change the formatting, so
|
||||
# use jq to get them into similar shape.
|
||||
jq -M . "${dir}"/manifest.json > "${dir}"/manifest.json.tmp && mv "${dir}"/manifest.json.tmp "${dir}"/manifest.json
|
||||
for candidate in "${dir}"/???????????????????????????????????????????????????????????????? ; do
|
||||
# If a digest-identified file looks like it was compressed,
|
||||
# decompress it, and replace its hash and size in the manifest
|
||||
# with the values for their decompressed versions.
|
||||
uncompressed=`zcat "${candidate}" 2> /dev/null | sha256sum | cut -c1-64`
|
||||
if test $? -eq 0 ; then
|
||||
if test "$uncompressed" != e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 ; then
|
||||
zcat "${candidate}" > "${dir}"/${uncompressed}
|
||||
sed -r -i -e "s#sha256:$(basename ${candidate})#sha256:${uncompressed}#g" "${dir}"/manifest.json
|
||||
sed -r -i -e "s#\"size\": $(wc -c < ${candidate}),#\"size\": $(wc -c < ${dir}/${uncompressed}),#g" "${dir}"/manifest.json
|
||||
rm -f "${candidate}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
6
integration/fixtures/blocked-registries.conf
Normal file
6
integration/fixtures/blocked-registries.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
[[registry]]
|
||||
location = "registry-unblocked.com"
|
||||
|
||||
[[registry]]
|
||||
location = "registry-blocked.com"
|
||||
blocked = true
|
||||
26
integration/fixtures/image.manifest.json
Normal file
26
integration/fixtures/image.manifest.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 7023,
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 16724,
|
||||
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 73109,
|
||||
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
|
||||
}
|
||||
]
|
||||
}
|
||||
136
integration/fixtures/policy.json
Normal file
136
integration/fixtures/policy.json
Normal file
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"default": [
|
||||
{
|
||||
"type": "reject"
|
||||
}
|
||||
],
|
||||
"transports": {
|
||||
"docker": {
|
||||
"localhost:5555": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/extension": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/mirroring-primary": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/mirroring-mirror": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/mirroring-remap": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "remapIdentity",
|
||||
"prefix": "localhost:5006/myns/mirroring-remap",
|
||||
"signedPrefix": "localhost:5006/myns/mirroring-primary"
|
||||
}
|
||||
}
|
||||
],
|
||||
"docker.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
],
|
||||
"quay.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
},
|
||||
"dir": {
|
||||
"/@dirpath@": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "localhost:5000/myns/official"
|
||||
}
|
||||
}
|
||||
],
|
||||
"": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
},
|
||||
"atomic": {
|
||||
"localhost:5006/myns/personal": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/official": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/naming:test1": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/naming:naming": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "localhost:5006/myns/official"
|
||||
}
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/cosigned:cosigned": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/official-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "localhost:5006/myns/official"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/extension": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
34
integration/fixtures/registries.conf
Normal file
34
integration/fixtures/registries.conf
Normal file
@@ -0,0 +1,34 @@
|
||||
[[registry]]
|
||||
location = "mirror.invalid"
|
||||
mirror = [
|
||||
{ location = "mirror-0.invalid" },
|
||||
{ location = "mirror-1.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
|
||||
# This entry is currently unused and exists only to ensure
|
||||
# that the mirror.invalid/busybox is not rewritten twice.
|
||||
[[registry]]
|
||||
location = "gcr.io"
|
||||
prefix = "gcr.io/google-containers"
|
||||
|
||||
[[registry]]
|
||||
location = "invalid.invalid"
|
||||
mirror = [
|
||||
{ location = "invalid-mirror-0.invalid" },
|
||||
{ location = "invalid-mirror-1.invalid" },
|
||||
]
|
||||
|
||||
[[registry]]
|
||||
location = "gcr.invalid"
|
||||
prefix = "gcr.invalid/foo/bar"
|
||||
mirror = [
|
||||
{ location = "wrong-mirror-0.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
|
||||
[[registry]]
|
||||
location = "localhost:5006/myns/mirroring-primary"
|
||||
mirror = [
|
||||
{ location = "localhost:5006/myns/mirroring-mirror"},
|
||||
]
|
||||
6
integration/fixtures/registries.yaml
Normal file
6
integration/fixtures/registries.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
docker:
|
||||
localhost:5555:
|
||||
sigstore: file://@sigstore@
|
||||
localhost:5555/public:
|
||||
sigstore-staging: file://@split-staging@
|
||||
sigstore: @split-read@
|
||||
Binary file not shown.
32
integration/fixtures/uncompressed-image-s1/manifest.json
Normal file
32
integration/fixtures/uncompressed-image-s1/manifest.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "nonempty",
|
||||
"tag": "nonempty",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"59c20544b2f4\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"59c20544b2f4ad7a8639433bacb1ec215b7dad4a7bf1a83b5ab4679329a46c1d\",\"container_config\":{\"Hostname\":\"59c20544b2f4\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"created\":\"2016-09-19T18:23:54.9949213Z\",\"docker_version\":\"1.10.3\",\"id\":\"4c224eac5061bb85f523ca4e3316618fd7921a80fe94286979667b1edb8e1bdd\",\"os\":\"linux\"}"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "DGWZ:GAUM:WCOC:IMDL:D67M:CEI6:YTVH:M2CM:5HX4:FYDD:77OD:D3F7",
|
||||
"kty": "EC",
|
||||
"x": "eprZNqLO9mHZ4Z4GxefucEgov_1gwEi9lehpJR2suRo",
|
||||
"y": "wIr2ucNg32ROfVCkR_8A5VbBJ-mFmsoIUVa6vt8lIxM"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "bvTLWW4YVFRjAanN1EJqwQw60fWSWJPxcGO3UZGFI_gyV6ucGdW4x7jyYL6g06sg925s9cy0wN1lw91CCFv4BA",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjE0ODcsImZvcm1hdFRhaWwiOiJDbjBLIiwidGltZSI6IjIwMTYtMDktMTlUMTg6NDM6MzNaIn0"
|
||||
}
|
||||
]
|
||||
}
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
{"architecture":"amd64","config":{"Hostname":"59c20544b2f4","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"59c20544b2f4ad7a8639433bacb1ec215b7dad4a7bf1a83b5ab4679329a46c1d","container_config":{"Hostname":"59c20544b2f4","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2016-09-19T18:23:54.9949213Z","docker_version":"1.10.3","history":[{"created":"2016-09-19T18:23:54.9949213Z","created_by":"/bin/sh -c #(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"]}}
|
||||
16
integration/fixtures/uncompressed-image-s2/manifest.json
Normal file
16
integration/fixtures/uncompressed-image-s2/manifest.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/octet-stream",
|
||||
"size": 1272,
|
||||
"digest": "sha256:86ce150e65c72b30f885c261449d18b7c6832596916e7f654e08377b5a67b4ff"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 2048,
|
||||
"digest": "sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
}
|
||||
]
|
||||
}
|
||||
6
integration/fixtures_info_test.go
Normal file
6
integration/fixtures_info_test.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package main
|
||||
|
||||
const (
|
||||
// TestImageManifestDigest is the Docker manifest digest of "fixtures/image.manifest.json"
|
||||
TestImageManifestDigest = "sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"
|
||||
)
|
||||
256
integration/openshift.go
Normal file
256
integration/openshift.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
var adminKUBECONFIG = map[string]string{
|
||||
"KUBECONFIG": "openshift.local.config/master/admin.kubeconfig",
|
||||
}
|
||||
|
||||
// openshiftCluster is an OpenShift API master and integrated registry
|
||||
// running on localhost.
|
||||
type openshiftCluster struct {
|
||||
workingDir string
|
||||
dockerDir string
|
||||
processes []*exec.Cmd // Processes to terminate on teardown; append to the end, terminate from end to the start.
|
||||
}
|
||||
|
||||
// startOpenshiftCluster creates a new openshiftCluster.
|
||||
// WARNING: This affects state in users' home directory! Only run
|
||||
// in isolated test environment.
|
||||
func startOpenshiftCluster(c *check.C) *openshiftCluster {
|
||||
cluster := &openshiftCluster{}
|
||||
|
||||
dir, err := ioutil.TempDir("", "openshift-cluster")
|
||||
c.Assert(err, check.IsNil)
|
||||
cluster.workingDir = dir
|
||||
|
||||
cluster.startMaster(c)
|
||||
cluster.prepareRegistryConfig(c)
|
||||
cluster.startRegistry(c)
|
||||
cluster.ocLoginToProject(c)
|
||||
cluster.dockerLogin(c)
|
||||
cluster.relaxImageSignerPermissions(c)
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
// clusterCmd creates an exec.Cmd in cluster.workingDir with current environment modified by environment
|
||||
func (cluster *openshiftCluster) clusterCmd(env map[string]string, name string, args ...string) *exec.Cmd {
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = cluster.workingDir
|
||||
cmd.Env = os.Environ()
|
||||
for key, value := range env {
|
||||
cmd.Env = modifyEnviron(cmd.Env, key, value)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// startMaster starts the OpenShift master (etcd+API server) and waits for it to be ready, or terminates on failure.
|
||||
func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
cmd := cluster.clusterCmd(nil, "openshift", "start", "master")
|
||||
cluster.processes = append(cluster.processes, cmd)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
// Send both to the same pipe. This might cause the two streams to be mixed up,
|
||||
// but logging actually goes only to stderr - this primarily ensure we log any
|
||||
// unexpected output to stdout.
|
||||
cmd.Stderr = cmd.Stdout
|
||||
err = cmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
portOpen, terminatePortCheck := newPortChecker(c, 8443)
|
||||
defer func() {
|
||||
c.Logf("Terminating port check")
|
||||
terminatePortCheck <- true
|
||||
}()
|
||||
|
||||
terminateLogCheck := make(chan bool, 1)
|
||||
logCheckFound := make(chan bool)
|
||||
go func() {
|
||||
defer func() {
|
||||
c.Logf("Log checker exiting")
|
||||
}()
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
c.Logf("Log line: %s", line)
|
||||
if strings.Contains(line, "Started Origin Controllers") {
|
||||
logCheckFound <- true
|
||||
return
|
||||
// FIXME? We stop reading from stdout; could this block the master?
|
||||
}
|
||||
// Note: we can block before we get here.
|
||||
select {
|
||||
case <-terminateLogCheck:
|
||||
c.Logf("terminated")
|
||||
return
|
||||
default:
|
||||
// Do not block here and read the next line.
|
||||
}
|
||||
}
|
||||
logCheckFound <- false
|
||||
}()
|
||||
defer func() {
|
||||
c.Logf("Terminating log check")
|
||||
terminateLogCheck <- true
|
||||
}()
|
||||
|
||||
gotPortCheck := false
|
||||
gotLogCheck := false
|
||||
for !gotPortCheck || !gotLogCheck {
|
||||
c.Logf("Waiting for master")
|
||||
select {
|
||||
case <-portOpen:
|
||||
c.Logf("port check done")
|
||||
gotPortCheck = true
|
||||
case found := <-logCheckFound:
|
||||
c.Logf("log check done, found: %t", found)
|
||||
if !found {
|
||||
c.Fatal("log check done, success message not found")
|
||||
}
|
||||
gotLogCheck = true
|
||||
}
|
||||
}
|
||||
c.Logf("OK, master started!")
|
||||
}
|
||||
|
||||
// prepareRegistryConfig creates a registry service account and a related k8s client configuration in ${cluster.workingDir}/openshift.local.registry.
|
||||
func (cluster *openshiftCluster) prepareRegistryConfig(c *check.C) {
|
||||
// This partially mimics the objects created by (oadm registry), except that we run the
|
||||
// server directly as an ordinary process instead of a pod with an implicitly attached service account.
|
||||
saJSON := `{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "registry"
|
||||
}
|
||||
}`
|
||||
cmd := cluster.clusterCmd(adminKUBECONFIG, "oc", "create", "-f", "-")
|
||||
runExecCmdWithInput(c, cmd, saJSON)
|
||||
|
||||
cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-user", "system:registry", "-z", "registry")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.Assert(string(out), check.Equals, "cluster role \"system:registry\" added: \"registry\"\n")
|
||||
|
||||
cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "create-api-client-config", "--client-dir=openshift.local.registry", "--basename=openshift-registry", "--user=system:serviceaccount:default:registry")
|
||||
out, err = cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.Assert(string(out), check.Equals, "")
|
||||
}
|
||||
|
||||
// startRegistry starts the OpenShift registry with configPart on port, waits for it to be ready, and returns the process object, or terminates on failure.
|
||||
func (cluster *openshiftCluster) startRegistryProcess(c *check.C, port int, configPath string) *exec.Cmd {
|
||||
cmd := cluster.clusterCmd(map[string]string{
|
||||
"KUBECONFIG": "openshift.local.registry/openshift-registry.kubeconfig",
|
||||
"DOCKER_REGISTRY_URL": fmt.Sprintf("127.0.0.1:%d", port),
|
||||
}, "dockerregistry", configPath)
|
||||
consumeAndLogOutputs(c, fmt.Sprintf("registry-%d", port), cmd)
|
||||
err := cmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
portOpen, terminatePortCheck := newPortChecker(c, port)
|
||||
defer func() {
|
||||
terminatePortCheck <- true
|
||||
}()
|
||||
c.Logf("Waiting for registry to start")
|
||||
<-portOpen
|
||||
c.Logf("OK, Registry port open")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// startRegistry starts the OpenShift registry and waits for it to be ready, or terminates on failure.
|
||||
func (cluster *openshiftCluster) startRegistry(c *check.C) {
|
||||
// Our “primary” registry
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5000, "/atomic-registry-config.yml"))
|
||||
|
||||
// A registry configured with acceptschema2:false
|
||||
schema1Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{
|
||||
"addr: :5000": "addr: :5005",
|
||||
"rootdirectory: /registry": "rootdirectory: /registry-schema1",
|
||||
// The default configuration currently already contains acceptschema2: false
|
||||
})
|
||||
// Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated.
|
||||
configContents, err := ioutil.ReadFile(schema1Config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*")
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config))
|
||||
|
||||
// A registry configured with acceptschema2:true
|
||||
schema2Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{
|
||||
"addr: :5000": "addr: :5006",
|
||||
"rootdirectory: /registry": "rootdirectory: /registry-schema2",
|
||||
"acceptschema2: false": "acceptschema2: true",
|
||||
})
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5006, schema2Config))
|
||||
}
|
||||
|
||||
// ocLogin runs (oc login) and (oc new-project) on the cluster, or terminates on failure.
|
||||
func (cluster *openshiftCluster) ocLoginToProject(c *check.C) {
|
||||
c.Logf("oc login")
|
||||
cmd := cluster.clusterCmd(nil, "oc", "login", "--certificate-authority=openshift.local.config/master/ca.crt", "-u", "myuser", "-p", "mypw", "https://localhost:8443")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
c.Assert(string(out), check.Matches, "(?s).*Login successful.*") // (?s) : '.' will also match newlines
|
||||
|
||||
outString := combinedOutputOfCommand(c, "oc", "new-project", "myns")
|
||||
c.Assert(outString, check.Matches, `(?s).*Now using project "myns".*`) // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
// dockerLogin simulates (docker login) to the cluster, or terminates on failure.
|
||||
// We do not run (docker login) directly, because that requires a running daemon and a docker package.
|
||||
func (cluster *openshiftCluster) dockerLogin(c *check.C) {
|
||||
cluster.dockerDir = filepath.Join(homedir.Get(), ".docker")
|
||||
err := os.Mkdir(cluster.dockerDir, 0700)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "oc", "config", "view", "-o", "json", "-o", "jsonpath={.users[*].user.token}")
|
||||
c.Logf("oc config value: %s", out)
|
||||
authValue := base64.StdEncoding.EncodeToString([]byte("unused:" + out))
|
||||
auths := []string{}
|
||||
for _, port := range []int{5000, 5005, 5006} {
|
||||
auths = append(auths, fmt.Sprintf(`"localhost:%d": {
|
||||
"auth": "%s",
|
||||
"email": "unused"
|
||||
}`, port, authValue))
|
||||
}
|
||||
configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}`
|
||||
err = ioutil.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
// relaxImageSignerPermissions opens up the system:image-signer permissions so that
|
||||
// anyone can work with signatures
|
||||
// FIXME: This also allows anyone to DoS anyone else; this design is really not all
|
||||
// that workable, but it is the best we can do for now.
|
||||
func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) {
|
||||
cmd := cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-group", "system:image-signer", "system:authenticated")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.Assert(string(out), check.Equals, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n")
|
||||
}
|
||||
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
func (cluster *openshiftCluster) tearDown(c *check.C) {
|
||||
for i := len(cluster.processes) - 1; i >= 0; i-- {
|
||||
cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.workingDir != "" {
|
||||
os.RemoveAll(cluster.workingDir)
|
||||
}
|
||||
if cluster.dockerDir != "" {
|
||||
os.RemoveAll(cluster.dockerDir)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user