mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
1099 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89966c513c | ||
|
|
2382f751cb | ||
|
|
f24433e290 | ||
|
|
14b05e8064 | ||
|
|
e95123a2d4 | ||
|
|
ca1b0f34d1 | ||
|
|
28a5365945 | ||
|
|
73a668e99d | ||
|
|
61c28f5d47 | ||
|
|
eafd7e5518 | ||
|
|
2cfbeb2db8 | ||
|
|
b9cf626ea3 | ||
|
|
263d3264ba | ||
|
|
63dabfcf8b | ||
|
|
2eac0f463a | ||
|
|
c10b63dc71 | ||
|
|
b7e7374e71 | ||
|
|
08846d18cc | ||
|
|
049163fcec | ||
|
|
3039cd5a77 | ||
|
|
b42e664854 | ||
|
|
ad12a292a3 | ||
|
|
ee477d8877 | ||
|
|
dbe47d765a | ||
|
|
f1485781be | ||
|
|
a03cba7c7e | ||
|
|
7ddc5ce06c | ||
|
|
b000ada3f3 | ||
|
|
f2b4071b1f | ||
|
|
06be7a1559 | ||
|
|
b95e081162 | ||
|
|
61593fccc6 | ||
|
|
62158a58bc | ||
|
|
5e8a236c95 | ||
|
|
fffdc1f9df | ||
|
|
f75d570709 | ||
|
|
7900440ac9 | ||
|
|
c654871bd9 | ||
|
|
7abcca9313 | ||
|
|
f7df4a0838 | ||
|
|
68e9f2c576 | ||
|
|
331162358b | ||
|
|
89089f3a8d | ||
|
|
ba6af16e53 | ||
|
|
bc84a02bc4 | ||
|
|
2024e2e258 | ||
|
|
774ff9d16f | ||
|
|
1462a45c91 | ||
|
|
7bfa5cbad8 | ||
|
|
899d3686f9 | ||
|
|
1a98f253b4 | ||
|
|
fdd8aa2fd0 | ||
|
|
2f77d21343 | ||
|
|
2009d1c61e | ||
|
|
168f8d648a | ||
|
|
fe0228095b | ||
|
|
14650880c8 | ||
|
|
e7363a2e30 | ||
|
|
71d450cb35 | ||
|
|
3738854467 | ||
|
|
38f4b9d032 | ||
|
|
1b5fb465be | ||
|
|
e9ed5e04e2 | ||
|
|
f2c1d77c57 | ||
|
|
bbdabebd17 | ||
|
|
4b5e6327cd | ||
|
|
92c0d0c09d | ||
|
|
a3a72342f2 | ||
|
|
14a3b9241e | ||
|
|
e9379d15d2 | ||
|
|
eb61a79dde | ||
|
|
69840fd082 | ||
|
|
dc905cb7be | ||
|
|
63622bc7c4 | ||
|
|
02ae5c2af5 | ||
|
|
6b58459829 | ||
|
|
a5d4e6655d | ||
|
|
00a58e48b1 | ||
|
|
db663df804 | ||
|
|
263a5f017f | ||
|
|
47afd101f0 | ||
|
|
0a3be734a9 | ||
|
|
e8a3064328 | ||
|
|
0ad7ec2402 | ||
|
|
014d47f396 | ||
|
|
0fa1b5038f | ||
|
|
1add7a81d7 | ||
|
|
d78bc82782 | ||
|
|
6c2a415f6c | ||
|
|
9bed0a9e9a | ||
|
|
ebc5573e83 | ||
|
|
1ebb2520ca | ||
|
|
9b4c1f15f5 | ||
|
|
6863fe2d35 | ||
|
|
4b924061b8 | ||
|
|
3eca480c2b | ||
|
|
149bb8a671 | ||
|
|
149dea8dce | ||
|
|
a90efa2d60 | ||
|
|
804f7c249d | ||
|
|
e47765ed9e | ||
|
|
0c6074db50 | ||
|
|
13ceb93bdf | ||
|
|
30446fae02 | ||
|
|
cd4607f96b | ||
|
|
37727a45f9 | ||
|
|
75d94e790c | ||
|
|
1fe8da63a9 | ||
|
|
737ed9c2a4 | ||
|
|
39a4475cf3 | ||
|
|
3c286dd1d1 | ||
|
|
b8b0e9937b | ||
|
|
437d33ec9a | ||
|
|
d9035db615 | ||
|
|
198842bbec | ||
|
|
916a395d82 | ||
|
|
89acf46019 | ||
|
|
8960ab3ce7 | ||
|
|
145304b7cf | ||
|
|
e534472e7d | ||
|
|
d9d3ceca45 | ||
|
|
23a4605742 | ||
|
|
4811c07d71 | ||
|
|
15b38112b1 | ||
|
|
4ef35a385a | ||
|
|
38ae81fa03 | ||
|
|
e4297e3b30 | ||
|
|
9b09b6eb87 | ||
|
|
45ed92ce0c | ||
|
|
c233a6dcb1 | ||
|
|
e0f0869151 | ||
|
|
e6802c4df4 | ||
|
|
2b910649b9 | ||
|
|
808717862b | ||
|
|
f45ae950aa | ||
|
|
3bc062423e | ||
|
|
d0d7d97f9c | ||
|
|
89cd19519f | ||
|
|
3e973e1aa2 | ||
|
|
7f6b0e39d0 | ||
|
|
cc2445de81 | ||
|
|
f6bf57460d | ||
|
|
a9cc9b9133 | ||
|
|
91cd3510eb | ||
|
|
ac7edc7d10 | ||
|
|
92b1eec64c | ||
|
|
c819bc1754 | ||
|
|
6a2f38d66c | ||
|
|
2019b79c7f | ||
|
|
f79cc8aeda | ||
|
|
0bfe297fc1 | ||
|
|
ac4c291f76 | ||
|
|
d2837c9e56 | ||
|
|
5aaf3a9e4c | ||
|
|
0c4a9cc684 | ||
|
|
bd524670b1 | ||
|
|
693de29e37 | ||
|
|
f44ee2f80a | ||
|
|
a71900996f | ||
|
|
a26578178b | ||
|
|
7ba56f3f7a | ||
|
|
0f701726bd | ||
|
|
91ad8c39c6 | ||
|
|
ad3e8f407d | ||
|
|
0703ec6ce8 | ||
|
|
3e2defd6d3 | ||
|
|
5200272846 | ||
|
|
43eab90b36 | ||
|
|
0ad25b2d33 | ||
|
|
22d187181b | ||
|
|
8cbfcc820a | ||
|
|
8539d21152 | ||
|
|
370be7e777 | ||
|
|
95e17ed1e0 | ||
|
|
73edfb8216 | ||
|
|
49084d2cd8 | ||
|
|
8b904e908e | ||
|
|
23183072fb | ||
|
|
3be97ce281 | ||
|
|
b46506c077 | ||
|
|
49d9fa9faf | ||
|
|
77363128e1 | ||
|
|
59a452276b | ||
|
|
0f363498c2 | ||
|
|
a2dccca2e6 | ||
|
|
27b77f2bde | ||
|
|
6eda759dd2 | ||
|
|
de71408294 | ||
|
|
13cd098079 | ||
|
|
697ef59525 | ||
|
|
e4b79d7741 | ||
|
|
bf24ce9ff2 | ||
|
|
162bbab3a6 | ||
|
|
cf19643e76 | ||
|
|
afc18ceed3 | ||
|
|
004519f143 | ||
|
|
9db60ec007 | ||
|
|
cb74933b41 | ||
|
|
8fb455174d | ||
|
|
7f4db3db9d | ||
|
|
96cdfac7d9 | ||
|
|
a4476c358c | ||
|
|
1391aae0a5 | ||
|
|
042f481629 | ||
|
|
3518c50688 | ||
|
|
327f87d79b | ||
|
|
bd8ed664d5 | ||
|
|
b51707d50d | ||
|
|
2c84bc232c | ||
|
|
bb49923af4 | ||
|
|
639aabbaf3 | ||
|
|
cd58349b25 | ||
|
|
4b79ed7d7d | ||
|
|
2858904e4b | ||
|
|
15296d9876 | ||
|
|
923c58a8ee | ||
|
|
43726bbc27 | ||
|
|
1bf18b7ef8 | ||
|
|
df4d82b960 | ||
|
|
d32c56b47f | ||
|
|
6007e792e4 | ||
|
|
77f881e61c | ||
|
|
5aa06a51f4 | ||
|
|
e422e44fca | ||
|
|
f6a84289eb | ||
|
|
2689eb367f | ||
|
|
c5b45c6c49 | ||
|
|
037f518146 | ||
|
|
c582c4844f | ||
|
|
2046bfdaaa | ||
|
|
25868f17c0 | ||
|
|
e7dc5e79f2 | ||
|
|
3606b2d1de | ||
|
|
f03d0401c1 | ||
|
|
5c82c7728f | ||
|
|
37d801c90b | ||
|
|
c3f65951bc | ||
|
|
d94015466f | ||
|
|
1d24e657fa | ||
|
|
4dcd28df92 | ||
|
|
789ee8bea9 | ||
|
|
8a88191c84 | ||
|
|
69728fdf93 | ||
|
|
904c745bb0 | ||
|
|
47066f2d77 | ||
|
|
fab344c335 | ||
|
|
adfa1d4e49 | ||
|
|
002978258c | ||
|
|
05a2ed4921 | ||
|
|
e9535f868b | ||
|
|
fa86297c36 | ||
|
|
2bb6f27d13 | ||
|
|
f90725d80c | ||
|
|
644074cbb4 | ||
|
|
83416068d3 | ||
|
|
a3adf36db6 | ||
|
|
6510f1011b | ||
|
|
e7b7be5734 | ||
|
|
1e01e38459 | ||
|
|
942cd6ec58 | ||
|
|
a902709e14 | ||
|
|
41de7f2f66 | ||
|
|
c264cec359 | ||
|
|
2b357d8276 | ||
|
|
4acc9f0d2c | ||
|
|
c2732cb15d | ||
|
|
49f709576a | ||
|
|
7885162a35 | ||
|
|
01e58f8e25 | ||
|
|
36d860ebce | ||
|
|
c8777f3bf7 | ||
|
|
8f64c0412f | ||
|
|
985d4c09ae | ||
|
|
8182255d22 | ||
|
|
11b5989872 | ||
|
|
2144a37c21 | ||
|
|
9c9a9f3a1f | ||
|
|
60c98cacde | ||
|
|
116e75fbfd | ||
|
|
89ecd5a4c0 | ||
|
|
fc81803bfa | ||
|
|
119eeb83a7 | ||
|
|
209a993159 | ||
|
|
5e7d11cbf3 | ||
|
|
fc86da2023 | ||
|
|
0f370eed02 | ||
|
|
3e4d4a480f | ||
|
|
3a97a0c032 | ||
|
|
ff88d3fcc2 | ||
|
|
64be259655 | ||
|
|
e19b57c3b9 | ||
|
|
2d5a00e833 | ||
|
|
b950f83c60 | ||
|
|
a95b0cc6fa | ||
|
|
12d0103730 | ||
|
|
53cf287e37 | ||
|
|
e0c53dfd9b | ||
|
|
86fa758ad8 | ||
|
|
aba57a8814 | ||
|
|
4d3588e46a | ||
|
|
93c42bcd74 | ||
|
|
2c2e5b773f | ||
|
|
25d3e7b46d | ||
|
|
c0f07d3dfd | ||
|
|
c5a5199f57 | ||
|
|
0ce7081e6d | ||
|
|
db1e814e86 | ||
|
|
52dafe8f8d | ||
|
|
31b8981b04 | ||
|
|
d8ba8b90fe | ||
|
|
ee8b8e77fc | ||
|
|
1d204fb10f | ||
|
|
6131077770 | ||
|
|
177443f47d | ||
|
|
ed96bf04a1 | ||
|
|
30f208ea59 | ||
|
|
a837fbe28b | ||
|
|
9edeb69f6a | ||
|
|
47b808275d | ||
|
|
a2d083ca84 | ||
|
|
4fda005a3e | ||
|
|
0e87d4d1ca | ||
|
|
5739b90946 | ||
|
|
c399909f04 | ||
|
|
5da1b0f304 | ||
|
|
102e2143ac | ||
|
|
291bbdf66c | ||
|
|
6bdadc8058 | ||
|
|
7d5ef9d9e7 | ||
|
|
70eaf171ea | ||
|
|
8da1c849a8 | ||
|
|
6196947297 | ||
|
|
ecd3809bf5 | ||
|
|
ec1ac5d0c8 | ||
|
|
a15fcbe63c | ||
|
|
082db20fc0 | ||
|
|
85ce748e8e | ||
|
|
8dce403b95 | ||
|
|
ab36f7f092 | ||
|
|
f6ae786508 | ||
|
|
4069abba0e | ||
|
|
9acb8b6a15 | ||
|
|
0ae0e8d23f | ||
|
|
a23b9f532d | ||
|
|
252af41dba | ||
|
|
be821b4f59 | ||
|
|
678682f128 | ||
|
|
da294ebce1 | ||
|
|
ab87b15fea | ||
|
|
1aa98baba4 | ||
|
|
3e127edb9c | ||
|
|
fbf9699867 | ||
|
|
a0084eda60 | ||
|
|
a3bb1cc5b8 | ||
|
|
8060e41dce | ||
|
|
0667a1e037 | ||
|
|
a44da449d3 | ||
|
|
788b2e2dd3 | ||
|
|
2135466ba3 | ||
|
|
3d9340c836 | ||
|
|
961d5da7ce | ||
|
|
920f0b2414 | ||
|
|
fb03e033cc | ||
|
|
caf1469b1d | ||
|
|
d70ea89050 | ||
|
|
a8f0c90206 | ||
|
|
ce6035b738 | ||
|
|
b6b7bd9250 | ||
|
|
c27d9063e5 | ||
|
|
3a8d3cb566 | ||
|
|
aeb61f656c | ||
|
|
76eb9bc9e9 | ||
|
|
a1f9318e7b | ||
|
|
64dc748e5e | ||
|
|
d82c662101 | ||
|
|
24a75c9608 | ||
|
|
f0c49b5ccc | ||
|
|
bef3b0c997 | ||
|
|
5e5506646d | ||
|
|
76bfc7f07f | ||
|
|
726d982ceb | ||
|
|
bb447f2f1e | ||
|
|
2a98df6b12 | ||
|
|
a6cf2f4293 | ||
|
|
bd309aed2a | ||
|
|
285a5cb6a0 | ||
|
|
3c2d98875d | ||
|
|
02bacf571d | ||
|
|
ae0595c56a | ||
|
|
b0ebbdd501 | ||
|
|
ec73ff3d91 | ||
|
|
ce2f64c946 | ||
|
|
e460b9aa8c | ||
|
|
643920b373 | ||
|
|
598f9e7ce3 | ||
|
|
ee05486383 | ||
|
|
2476e99cb1 | ||
|
|
074cfda358 | ||
|
|
cec7aa68f7 | ||
|
|
dc1cf646e0 | ||
|
|
76103a6c2d | ||
|
|
990908bf80 | ||
|
|
a6e745dad5 | ||
|
|
ede29c9168 | ||
|
|
75f0183edc | ||
|
|
7ace4265fb | ||
|
|
3d4fb09f2c | ||
|
|
92ad5eddcc | ||
|
|
4efeb71e28 | ||
|
|
392c6fce02 | ||
|
|
a0ce542193 | ||
|
|
0035a9aecb | ||
|
|
f80bf8a39f | ||
|
|
0fac3f10d3 | ||
|
|
c39b3dc266 | ||
|
|
07c81c7777 | ||
|
|
8eaf0329f8 | ||
|
|
378e6694c7 | ||
|
|
aeb75f3857 | ||
|
|
2286a58a39 | ||
|
|
83603a79d4 | ||
|
|
37b24aedd7 | ||
|
|
6d6c8b5609 | ||
|
|
99621f4168 | ||
|
|
09282bcf88 | ||
|
|
09ca3ba47f | ||
|
|
22908fb3e8 | ||
|
|
a37251289a | ||
|
|
e4d1392085 | ||
|
|
71e7a5839e | ||
|
|
316503341b | ||
|
|
e716b2fa66 | ||
|
|
97eaace7db | ||
|
|
846ea33b40 | ||
|
|
30c0eb03f0 | ||
|
|
7cb70f4e9c | ||
|
|
5918513ed5 | ||
|
|
b768f4e3af | ||
|
|
b20c2d45f1 | ||
|
|
fc3678038e | ||
|
|
d0f7339b77 | ||
|
|
af550fda48 | ||
|
|
012ed6610e | ||
|
|
f7aab1aba5 | ||
|
|
c30b904cbe | ||
|
|
45028801eb | ||
|
|
9fbb9abc6d | ||
|
|
69fd1d4be0 | ||
|
|
4417dc4402 | ||
|
|
8f0ae5bde6 | ||
|
|
93b819a766 | ||
|
|
ce06c87817 | ||
|
|
e7c5e9f7e6 | ||
|
|
8a1214a07b | ||
|
|
1eac38e3ce | ||
|
|
5000f745b0 | ||
|
|
b1e78efaa2 | ||
|
|
ccdaf6e0f2 | ||
|
|
d25476e4f7 | ||
|
|
298f7476d0 | ||
|
|
2fee990acc | ||
|
|
6ba1affd23 | ||
|
|
5778d9bd67 | ||
|
|
df17004709 | ||
|
|
ad4ec8b496 | ||
|
|
5f8ec87c54 | ||
|
|
abdc4a7e42 | ||
|
|
513a524d7d | ||
|
|
d4a500069e | ||
|
|
bcc18ebfb7 | ||
|
|
9b9ef675c1 | ||
|
|
dde3e759f6 | ||
|
|
622faa0b8a | ||
|
|
9a5f009ea2 | ||
|
|
865407cad0 | ||
|
|
ec13aa6d87 | ||
|
|
780de354d4 | ||
|
|
10c4c877ba | ||
|
|
e32f3f1792 | ||
|
|
a07f1e0f89 | ||
|
|
a2c8022a21 | ||
|
|
b9661b2a05 | ||
|
|
761100143a | ||
|
|
a0b6ea288d | ||
|
|
e5cb7ce196 | ||
|
|
c806083830 | ||
|
|
714ffe1b60 | ||
|
|
cac3f2b140 | ||
|
|
8efffce8be | ||
|
|
efc789be55 | ||
|
|
6452a9b6f6 | ||
|
|
184f0eee58 | ||
|
|
5af5f8a0e7 | ||
|
|
65ed9920da | ||
|
|
c35944bec0 | ||
|
|
266dc3dc9a | ||
|
|
91d9ccf5e5 | ||
|
|
4e57679c9a | ||
|
|
68f188ae77 | ||
|
|
0faf160170 | ||
|
|
69decaeb1d | ||
|
|
001775e994 | ||
|
|
fc448c2253 | ||
|
|
b10d3e43a4 | ||
|
|
a32be320cb | ||
|
|
5e13a55444 | ||
|
|
c0d259712c | ||
|
|
70abdf7334 | ||
|
|
f232ae499b | ||
|
|
aba84840dc | ||
|
|
e536c4da34 | ||
|
|
a1a8692457 | ||
|
|
5a594bff65 | ||
|
|
2eb35e7af9 | ||
|
|
00490a2cbb | ||
|
|
9a10ee2f1f | ||
|
|
002b2e4db9 | ||
|
|
891d9750a3 | ||
|
|
d6912022b5 | ||
|
|
eab7c4b0d1 | ||
|
|
7898ffaf23 | ||
|
|
ce4304a0ad | ||
|
|
610c612129 | ||
|
|
ad9f1d7bb9 | ||
|
|
37f15d6d11 | ||
|
|
2d3f3ed901 | ||
|
|
65d3890ea1 | ||
|
|
87f36844c3 | ||
|
|
a81cd74734 | ||
|
|
5a3e8b6150 | ||
|
|
88979a6a88 | ||
|
|
146af8cd59 | ||
|
|
6b95125757 | ||
|
|
6ee20f9d2a | ||
|
|
c84fc7d243 | ||
|
|
060fe4b47f | ||
|
|
3a759d5136 | ||
|
|
f15564f705 | ||
|
|
85e0fde20e | ||
|
|
fe1cd126f6 | ||
|
|
7a74faf4c1 | ||
|
|
dbe6764b35 | ||
|
|
5485daff13 | ||
|
|
cfbabac961 | ||
|
|
5907b4ef08 | ||
|
|
c456cef9bd | ||
|
|
0196219924 | ||
|
|
e945435dea | ||
|
|
c5103c6b51 | ||
|
|
61722a8a70 | ||
|
|
cc3ddf4804 | ||
|
|
d9f4377831 | ||
|
|
0717014e46 | ||
|
|
80dcddef36 | ||
|
|
6b41287cbf | ||
|
|
bef5e4505e | ||
|
|
f5a028e4d9 | ||
|
|
3d1d2978d7 | ||
|
|
035eb33f1f | ||
|
|
6cbb0c4c88 | ||
|
|
663fe44f27 | ||
|
|
cc24482985 | ||
|
|
b7bf15bc8b | ||
|
|
61b62f9e93 | ||
|
|
2c8655e251 | ||
|
|
94d588c480 | ||
|
|
a85e3beccf | ||
|
|
3878a37660 | ||
|
|
be600975a9 | ||
|
|
15f0d5cd2f | ||
|
|
6fa634227c | ||
|
|
e224b78efc | ||
|
|
1c4b0fc33d | ||
|
|
81e66ffc46 | ||
|
|
5995ceedf9 | ||
|
|
7a9d638989 | ||
|
|
40f5a8cf69 | ||
|
|
a6e50d32d2 | ||
|
|
9a88c3986d | ||
|
|
ac5241482c | ||
|
|
aff1b6215b | ||
|
|
e0ba05af59 | ||
|
|
55b9782058 | ||
|
|
4ab7faa800 | ||
|
|
c51c7b4e4d | ||
|
|
3375a905cc | ||
|
|
f3c8d26cd8 | ||
|
|
e1dc30b6e1 | ||
|
|
a9e9bdc534 | ||
|
|
6c8b8c20f5 | ||
|
|
0e1ee196bd | ||
|
|
77a2e08eb2 | ||
|
|
a3c21f25c9 | ||
|
|
1e1952693a | ||
|
|
efc0170ee8 | ||
|
|
0d0a97eb00 | ||
|
|
47a6716921 | ||
|
|
18e6c6f17b | ||
|
|
ef6f46a3b5 | ||
|
|
31562124a3 | ||
|
|
b544c1be3a | ||
|
|
0c0a17b641 | ||
|
|
2e90a8af5a | ||
|
|
2294113c78 | ||
|
|
bdb117ded6 | ||
|
|
beadcbb17d | ||
|
|
fe57e80c18 | ||
|
|
ac07bf278a | ||
|
|
3c33cb4556 | ||
|
|
f94d85aa8e | ||
|
|
b0da05656d | ||
|
|
9828f21007 | ||
|
|
6ee4b2dc84 | ||
|
|
b3a15e7288 | ||
|
|
f771cb0d39 | ||
|
|
c4fb93647a | ||
|
|
81535c5244 | ||
|
|
7442052875 | ||
|
|
84232cf306 | ||
|
|
c339a1abe9 | ||
|
|
766927d1d4 | ||
|
|
fc78c93ad2 | ||
|
|
4987a67293 | ||
|
|
131b2b8c63 | ||
|
|
342b8398e2 | ||
|
|
6b260e1686 | ||
|
|
6294875a04 | ||
|
|
8cc9fcae6f | ||
|
|
4769dd0689 | ||
|
|
0fb1121f36 | ||
|
|
4aaa9b401d | ||
|
|
44087c4866 | ||
|
|
f36f7dbfdf | ||
|
|
07c0e6a50f | ||
|
|
ed321809d3 | ||
|
|
13ef91744c | ||
|
|
5b8fe7ffa5 | ||
|
|
8cd57ef8de | ||
|
|
1b813f805b | ||
|
|
f3a8a7360d | ||
|
|
42e9121eba | ||
|
|
4597c09522 | ||
|
|
2ec251c2e2 | ||
|
|
e717a59174 | ||
|
|
c88576b2fc | ||
|
|
901f7e9c47 | ||
|
|
0f4dc80c99 | ||
|
|
353f3a23e1 | ||
|
|
4b4ad6285e | ||
|
|
6b007c70c7 | ||
|
|
6a48870594 | ||
|
|
5d73dea577 | ||
|
|
82e461ff9d | ||
|
|
e30abff31b | ||
|
|
7fee9122fb | ||
|
|
2342171cdf | ||
|
|
58c9eccffd | ||
|
|
23fa1666dd | ||
|
|
fa2e385713 | ||
|
|
958c361c97 | ||
|
|
72e8af59aa | ||
|
|
873fbee01b | ||
|
|
1a3eb478a7 | ||
|
|
bc0ecfc8f6 | ||
|
|
4ad2c75b52 | ||
|
|
9662633059 | ||
|
|
18fe2fd00a | ||
|
|
19f9a6adc2 | ||
|
|
11b4fd3956 | ||
|
|
8d2c20f160 | ||
|
|
6d7d0e7d39 | ||
|
|
39f8117c27 | ||
|
|
e709329b03 | ||
|
|
1a3ae1411e | ||
|
|
35daba1194 | ||
|
|
65c5b0bf8d | ||
|
|
cd884fa529 | ||
|
|
3a72464068 | ||
|
|
32e242586c | ||
|
|
a7f4b26f90 | ||
|
|
9bcae7060a | ||
|
|
98fdb042a1 | ||
|
|
ceaee440a6 | ||
|
|
6eb4fb64a0 | ||
|
|
a75daba386 | ||
|
|
67d72d27c9 | ||
|
|
362f70b056 | ||
|
|
035e25496a | ||
|
|
10da9f7012 | ||
|
|
c18a977e96 | ||
|
|
0954077fd7 | ||
|
|
bde39ce91d | ||
|
|
a422316d48 | ||
|
|
21aa04e3c3 | ||
|
|
4cc72b9f69 | ||
|
|
50ff352e41 | ||
|
|
027d7e466a | ||
|
|
69f51ac183 | ||
|
|
d8bc8b62e9 | ||
|
|
f9773889a1 | ||
|
|
6dabefa9db | ||
|
|
5364f84119 | ||
|
|
4ba7d50174 | ||
|
|
12729c4d7e | ||
|
|
44beab63c9 | ||
|
|
669627d1b6 | ||
|
|
1c45df1e03 | ||
|
|
f91a9c569d | ||
|
|
248a1dd01a | ||
|
|
3a75b51b59 | ||
|
|
2b4097bc13 | ||
|
|
8151b89b81 | ||
|
|
cbd7fb7d37 | ||
|
|
77293ff9c4 | ||
|
|
467b462b79 | ||
|
|
242b573f9a | ||
|
|
2d5f12b9a6 | ||
|
|
3c73c0c0cd | ||
|
|
ec17cfcbf1 | ||
|
|
1d0b1671f8 | ||
|
|
bbd800f974 | ||
|
|
12ab19f5fd | ||
|
|
05d172a1f5 | ||
|
|
45a9efb37f | ||
|
|
62bafb102d | ||
|
|
4eda1d092d | ||
|
|
5dd09d76c3 | ||
|
|
23cb1b7f19 | ||
|
|
c1f984a176 | ||
|
|
662f9ac8f7 | ||
|
|
ae26454014 | ||
|
|
5e1d64825c | ||
|
|
8767e73fe9 | ||
|
|
071462199d | ||
|
|
3bb23e355e | ||
|
|
c4998ebf3f | ||
|
|
a13b581760 | ||
|
|
c8c8d5db78 | ||
|
|
ad3d4aecbb | ||
|
|
87484a1754 | ||
|
|
58b9ec9e08 | ||
|
|
6911642122 | ||
|
|
3ede91cca6 | ||
|
|
5d5756cc83 | ||
|
|
5ad62b9415 | ||
|
|
88c8c47ce0 | ||
|
|
e4f656616c | ||
|
|
b05933fbc4 | ||
|
|
e5f549099b | ||
|
|
ea10e61f7d | ||
|
|
915f40d12a | ||
|
|
0c2c7f4016 | ||
|
|
135ce43169 | ||
|
|
0f94dbcdb3 | ||
|
|
f30bab47e6 | ||
|
|
baeaad61d9 | ||
|
|
c750be0107 | ||
|
|
84d051fc01 | ||
|
|
56f8222e12 | ||
|
|
78d2f67016 | ||
|
|
c24363ccda | ||
|
|
c052ed7ec8 | ||
|
|
5e88eb5761 | ||
|
|
4fb724fb7b | ||
|
|
e23b780072 | ||
|
|
d9058b3021 | ||
|
|
62fd5a76e1 | ||
|
|
6252c22112 | ||
|
|
26e6db1cc7 | ||
|
|
b7cdcb00ac | ||
|
|
153f18dc0a | ||
|
|
4012d0e30c | ||
|
|
494d237789 | ||
|
|
84c53d104a | ||
|
|
89fb89a456 | ||
|
|
960b610ff6 | ||
|
|
29eec32795 | ||
|
|
2fa7b998ba | ||
|
|
ebc438266d | ||
|
|
8f5eb45ba6 | ||
|
|
6284ceb2b6 | ||
|
|
5e2264d2b5 | ||
|
|
6e295a2097 | ||
|
|
19f9a5c2fa | ||
|
|
f63685f3c8 | ||
|
|
dc5f68fe5f | ||
|
|
0858cafffc | ||
|
|
2e343342d5 | ||
|
|
840c48752e | ||
|
|
0382b01687 | ||
|
|
ee72e803ec | ||
|
|
142142c040 | ||
|
|
6182aa30b1 | ||
|
|
ec9f8acf00 | ||
|
|
52b3a5bacc | ||
|
|
ac6b871f66 | ||
|
|
b17fb08f8b | ||
|
|
dd2e70e9b7 | ||
|
|
ba8cbf589b | ||
|
|
91dc0f3f4c | ||
|
|
7815c8ac6f | ||
|
|
233e61cf9a | ||
|
|
0e2611d3a6 | ||
|
|
96bd4a0619 | ||
|
|
6b78619cd1 | ||
|
|
0f458eec76 | ||
|
|
6b960ec031 | ||
|
|
fdc58131f8 | ||
|
|
63085f5bef | ||
|
|
091f9248dc | ||
|
|
dd7dd75334 | ||
|
|
b70dfae2ae | ||
|
|
0bd78a0604 | ||
|
|
9e0839c33f | ||
|
|
9bafa7e80d | ||
|
|
827293a13b | ||
|
|
6198daeb2c | ||
|
|
161ef5a224 | ||
|
|
9e99ad99d4 | ||
|
|
c36502ce31 | ||
|
|
f9b0d93ee0 | ||
|
|
4eaaf31249 | ||
|
|
c6b488a82c | ||
|
|
7cfc62922f | ||
|
|
5284f6d832 | ||
|
|
ae97c667e3 | ||
|
|
a2c1d46302 | ||
|
|
8b4b954332 | ||
|
|
c103d65284 | ||
|
|
c5183d0e34 | ||
|
|
16b435257b | ||
|
|
35f3595d02 | ||
|
|
0ee81dc9fe | ||
|
|
805885091f | ||
|
|
97ec6873fa | ||
|
|
d16cd39939 | ||
|
|
7439f94e22 | ||
|
|
443380731e | ||
|
|
56c6325ba0 | ||
|
|
0ae9db5dd6 | ||
|
|
677c29bf24 | ||
|
|
72376c4144 | ||
|
|
322625eeca | ||
|
|
9c1936fd07 | ||
|
|
3a94432e42 | ||
|
|
ce1f807aa0 | ||
|
|
a51af64dd9 | ||
|
|
a31d6069dc | ||
|
|
96353f2b64 | ||
|
|
2330455c8d | ||
|
|
91a88de6a1 | ||
|
|
2afe7a3e1e | ||
|
|
bec7f6977e | ||
|
|
60ecaffbe8 | ||
|
|
dcaee948d3 | ||
|
|
2fe7087d52 | ||
|
|
bd162028cd | ||
|
|
a214a305fd | ||
|
|
5093d5b5f6 | ||
|
|
0d9939dcd4 | ||
|
|
1b2de8ec5d | ||
|
|
ab2300500a | ||
|
|
fbf061260c | ||
|
|
4244d68240 | ||
|
|
dda31b3d4b | ||
|
|
2af172653c | ||
|
|
3247c0d229 | ||
|
|
eb024319de | ||
|
|
4ca9b139bb | ||
|
|
b79a37ead9 | ||
|
|
0ec2610f04 | ||
|
|
71a14d7df6 | ||
|
|
8936e76316 | ||
|
|
e21d6b3687 | ||
|
|
a6ab2291ba | ||
|
|
8f845aac23 | ||
|
|
439ea83081 | ||
|
|
42f68c1c76 | ||
|
|
8d252f82fd | ||
|
|
1ddb736b5a | ||
|
|
46fbbbd282 | ||
|
|
e7a7f018bd | ||
|
|
311fc89548 | ||
|
|
a6abdb8547 | ||
|
|
02407d98a5 | ||
|
|
b230a507e7 | ||
|
|
116add9d00 | ||
|
|
2415f3fa4d | ||
|
|
5f8d3fc639 | ||
|
|
1119299c4b | ||
|
|
2d91b93ad0 | ||
|
|
cf4dff471c | ||
|
|
101901ab40 | ||
|
|
17848a1868 | ||
|
|
9d21b48f8b | ||
|
|
2873d8ec91 | ||
|
|
9d63c7cd54 | ||
|
|
1f321dfc69 | ||
|
|
702165af46 | ||
|
|
8c8d9bd23f | ||
|
|
6ac3dce04e | ||
|
|
5b479b1090 | ||
|
|
b2033f3f9d | ||
|
|
f68b53afcd | ||
|
|
71a8ff0122 | ||
|
|
6569236642 | ||
|
|
8fa332618b | ||
|
|
0eef946e55 | ||
|
|
5d512e265a | ||
|
|
82e79e3f43 | ||
|
|
3e9d8ae731 | ||
|
|
325327dc3f | ||
|
|
bd20786c38 | ||
|
|
6e3f4c99be | ||
|
|
6db5626c3b | ||
|
|
eb199dce9c | ||
|
|
27b330f6f1 | ||
|
|
f231b7776b | ||
|
|
018a0108b1 | ||
|
|
55044627cc | ||
|
|
aa20fbfdf5 | ||
|
|
a6f5ef18c5 | ||
|
|
274efdf28f | ||
|
|
501452a500 | ||
|
|
336164246b | ||
|
|
e31d5a0e8f | ||
|
|
ed883c5230 | ||
|
|
7fee7d5019 | ||
|
|
970af7d1b4 | ||
|
|
12865fdfb8 | ||
|
|
1e7fe55be0 | ||
|
|
7170702ee4 | ||
|
|
081e4834d5 | ||
|
|
b541fef300 | ||
|
|
bd59677a84 | ||
|
|
7a0a8c25a2 | ||
|
|
a7ff66f09e | ||
|
|
dda59750e6 | ||
|
|
a99450c002 | ||
|
|
ebeb1c3f59 | ||
|
|
cd1ffbdb90 | ||
|
|
33ebce0880 | ||
|
|
cc43fd50d7 | ||
|
|
0752e837e5 | ||
|
|
2e9a426a78 | ||
|
|
406d3eb134 | ||
|
|
14e9834d55 | ||
|
|
bae3378171 | ||
|
|
71c382c043 | ||
|
|
7dcfc18309 | ||
|
|
9b984e8eba | ||
|
|
4e45fcc584 | ||
|
|
0d84f81305 | ||
|
|
2e65e64c06 | ||
|
|
4c4a4b611e | ||
|
|
ef1b005c95 | ||
|
|
fcbc889abf | ||
|
|
7be2a1bf3b | ||
|
|
c05fbf4573 | ||
|
|
d7a2bd7230 | ||
|
|
f489ba7bfc | ||
|
|
0a91c00ebe | ||
|
|
df2966b766 | ||
|
|
7c29094b51 | ||
|
|
88f6057eaa | ||
|
|
c2fa78096b | ||
|
|
8d1a4649f2 | ||
|
|
377ba25c6b | ||
|
|
1d136f0541 | ||
|
|
7b9629d6dc | ||
|
|
07c89b49ff | ||
|
|
a9854e1173 | ||
|
|
36fdc062ba | ||
|
|
5554964a8f | ||
|
|
b0cfab1d45 | ||
|
|
cce44c45d5 | ||
|
|
c8e0250903 | ||
|
|
f830265034 | ||
|
|
fea7ada700 | ||
|
|
ba8417edf3 | ||
|
|
2eb86a3be7 | ||
|
|
759dc98b32 | ||
|
|
7d080caaa3 | ||
|
|
b6e7fbdfdf | ||
|
|
9d10912ced | ||
|
|
dd8275528a | ||
|
|
b59c018afd | ||
|
|
e3f9f55c56 | ||
|
|
97aae7a7e4 | ||
|
|
2c0a4c9c17 | ||
|
|
9a0dba940d | ||
|
|
a7297d4db7 | ||
|
|
7cbb8ad3ba | ||
|
|
4489ddd8a5 | ||
|
|
c6a731bb2e | ||
|
|
222beaf4c7 | ||
|
|
763e48858b | ||
|
|
6c7dc9b7c9 | ||
|
|
e955849f0a | ||
|
|
27b3e21842 | ||
|
|
8652b650b6 | ||
|
|
17b921cbbc | ||
|
|
c3e6b4f917 | ||
|
|
2f2dc64681 | ||
|
|
5291aac96b | ||
|
|
b0da085857 | ||
|
|
407f2e95e0 | ||
|
|
7d3c3ce561 | ||
|
|
e8d49d60ff | ||
|
|
21613f194f | ||
|
|
afaa9e7f00 | ||
|
|
9c402f3799 | ||
|
|
8ccd7b994d | ||
|
|
3ed6e83c4a | ||
|
|
04bc64f593 | ||
|
|
73248bd639 | ||
|
|
2bfa89532d | ||
|
|
c6bc236769 | ||
|
|
ce6ec7720e | ||
|
|
39ff039b3b | ||
|
|
912b7e1e09 | ||
|
|
88fd117257 | ||
|
|
34ab4c4f32 | ||
|
|
5f3219a854 | ||
|
|
23d9383a36 | ||
|
|
39540db170 | ||
|
|
af0734832b | ||
|
|
81caa0fa5d | ||
|
|
dd66c1d342 | ||
|
|
6991ba8563 | ||
|
|
db877dca36 | ||
|
|
8eba94f271 | ||
|
|
24f4f82c09 | ||
|
|
05ae513b18 | ||
|
|
332bb459e6 | ||
|
|
307d9c2252 | ||
|
|
1094c7d61d | ||
|
|
876595e634 | ||
|
|
140b47e8e9 | ||
|
|
cd8c0085b1 | ||
|
|
75b7d1e2af | ||
|
|
10d0ebb9fe | ||
|
|
a18ec3f693 | ||
|
|
02432cf063 | ||
|
|
d83d081942 | ||
|
|
ce2db02200 | ||
|
|
df92a33ca9 | ||
|
|
153520e20e | ||
|
|
b21a59705f | ||
|
|
a263b353a1 | ||
|
|
a18a5eaecd | ||
|
|
be6146b0a8 | ||
|
|
8057da700c | ||
|
|
8f24d28130 | ||
|
|
2553a230d0 | ||
|
|
4b6a5da86a | ||
|
|
7d251f5a74 | ||
|
|
5f9a6ea621 | ||
|
|
58248412bd | ||
|
|
5b0a7890ea | ||
|
|
4962559e5c | ||
|
|
f72e39fc10 | ||
|
|
7922028d7c | ||
|
|
881edbf122 | ||
|
|
51b54191a8 | ||
|
|
fa6e58074d | ||
|
|
1c243a5b12 | ||
|
|
7eb5f39255 | ||
|
|
8d1bb15075 | ||
|
|
5ae6b16c0f | ||
|
|
8c96dca362 | ||
|
|
1d8f5f29a5 | ||
|
|
b31f0da5c6 | ||
|
|
a02e57dde8 | ||
|
|
a000c1943d | ||
|
|
86e3564356 | ||
|
|
c61a5ea2c4 | ||
|
|
89bb6158eb | ||
|
|
646e197eed | ||
|
|
699c25568c | ||
|
|
0c579aca9c | ||
|
|
bc8281c016 | ||
|
|
91510e39ab | ||
|
|
7b0db25a74 | ||
|
|
18f0e1e20c | ||
|
|
a36d81c55c | ||
|
|
976dd83a62 | ||
|
|
9019e27ec5 | ||
|
|
a1c5a1f4d2 | ||
|
|
c4b0c7ce05 | ||
|
|
43b014c82a | ||
|
|
1e2d6f619b | ||
|
|
f1d8451b09 | ||
|
|
481bb94c5f | ||
|
|
a778e595b3 | ||
|
|
ee9e9dfc89 | ||
|
|
0f1ded2ac8 | ||
|
|
44bc4a9eb7 | ||
|
|
89d6d0c70f |
254
.cirrus.yml
Normal file
254
.cirrus.yml
Normal file
@@ -0,0 +1,254 @@
|
||||
---
|
||||
|
||||
# Main collection of env. vars to set for all tasks and scripts.
|
||||
env:
|
||||
####
|
||||
#### Global variables used for all tasks
|
||||
####
|
||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||
DEST_BRANCH: "main"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
GOPATH: &gopath "/var/tmp/go"
|
||||
GOBIN: "${GOPATH}/bin"
|
||||
GOCACHE: "${GOPATH}/cache"
|
||||
GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/skopeo"
|
||||
# Required for consistency with containers/image CI
|
||||
SKOPEO_PATH: *gosrc
|
||||
CIRRUS_WORKING_DIR: *gosrc
|
||||
# The default is 'sh' if unspecified
|
||||
CIRRUS_SHELL: "/bin/bash"
|
||||
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
|
||||
SCRIPT_BASE: "./contrib/cirrus"
|
||||
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-36"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c5495735033528320"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Built along with the standard PR-based workflow in c/automation_images
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
|
||||
|
||||
|
||||
# Default timeout for each task
|
||||
timeout_in: 45m
|
||||
|
||||
|
||||
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
|
||||
|
||||
|
||||
validate_task:
|
||||
# The git-validation tool doesn't work well on branch or tag push,
|
||||
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
|
||||
# Only do validation for PRs.
|
||||
only_if: &is_pr $CIRRUS_PR != ''
|
||||
container:
|
||||
image: '${SKOPEO_CIDEV_CONTAINER_FQIN}'
|
||||
cpu: 4
|
||||
memory: 8
|
||||
script: |
|
||||
make validate-local
|
||||
make vendor && hack/tree_status.sh
|
||||
|
||||
doccheck_task:
|
||||
only_if: *is_pr
|
||||
depends_on:
|
||||
- validate
|
||||
container:
|
||||
image: "${FEDORA_CONTAINER_FQIN}"
|
||||
cpu: 4
|
||||
memory: 8
|
||||
env:
|
||||
BUILDTAGS: &withopengpg 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||
script: |
|
||||
# TODO: Can't use 'runner.sh setup' inside container. However,
|
||||
# removing the pre-installed package is the only necessary step
|
||||
# at the time of this comment.
|
||||
dnf erase -y skopeo # Guarantee non-interference
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
|
||||
|
||||
osx_task:
|
||||
# Run for regular PRs and those with [CI:BUILD] but not [CI:DOCS]
|
||||
only_if: ¬_docs_multiarch >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- validate
|
||||
macos_instance:
|
||||
image: catalina-xcode
|
||||
setup_script: |
|
||||
# /usr/local/opt/go@1.18 will be populated by (brew install go@1.18) below
|
||||
export PATH=$GOPATH/bin:/usr/local/opt/go@1.18/bin:$PATH
|
||||
brew update
|
||||
brew install gpgme go@1.18 go-md2man
|
||||
go install golang.org/x/lint/golint@latest
|
||||
test_script: |
|
||||
export PATH=$GOPATH/bin:/usr/local/opt/go@1.18/bin:$PATH
|
||||
go version
|
||||
go env
|
||||
make validate-local test-unit-local bin/skopeo
|
||||
sudo make install
|
||||
/usr/local/bin/skopeo -v
|
||||
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
only_if: *not_docs_multiarch
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance: &standardvm
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
cross_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" cross
|
||||
|
||||
|
||||
#####
|
||||
##### NOTE: This task is subtantially duplicated in the containers/image
|
||||
##### repository's `.cirrus.yml`. Changes made here should be fully merged
|
||||
##### prior to being manually duplicated and maintained in containers/image.
|
||||
#####
|
||||
test_skopeo_task:
|
||||
alias: test_skopeo
|
||||
# Don't test for [CI:DOCS], [CI:BUILD], or 'multiarch' cron.
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
matrix:
|
||||
- name: "Skopeo Test" # N/B: Name ref. by hack/get_fqin.sh
|
||||
env:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
|
||||
- name: "Skopeo Test w/ opengpg"
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
vendor_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
|
||||
build_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
unit_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
|
||||
integration_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" integration
|
||||
system_script: >
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
|
||||
|
||||
|
||||
image_build_task: &image-build
|
||||
name: "Build multi-arch $CTXDIR"
|
||||
alias: image_build
|
||||
# Some of these container images take > 1h to build, limit
|
||||
# this task to a specific Cirrus-Cron entry with this name.
|
||||
only_if: $CIRRUS_CRON == 'multiarch'
|
||||
timeout_in: 120m # emulation is sssllllooooowwww
|
||||
gce_instance:
|
||||
<<: *standardvm
|
||||
image_name: build-push-${IMAGE_SUFFIX}
|
||||
# More muscle required for parallel multi-arch build
|
||||
type: "n2-standard-4"
|
||||
matrix:
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/upstream
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/testing
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/stable
|
||||
env:
|
||||
SKOPEO_USERNAME: ENCRYPTED[4195884d23b154553f2ddb26a63fc9fbca50ba77b3e447e4da685d8639ed9bc94b9a86a9c77272c8c80d32ead9ca48da]
|
||||
SKOPEO_PASSWORD: ENCRYPTED[36e06f9befd17e5da2d60260edb9ef0d40e6312e2bba4cf881d383f1b8b5a18c8e5a553aea2fdebf39cebc6bd3b3f9de]
|
||||
CONTAINERS_USERNAME: ENCRYPTED[dd722c734641f103b394a3a834d51ca5415347e378637cf98ee1f99e64aad2ec3dbd4664c0d94cb0e06b83d89e9bbe91]
|
||||
CONTAINERS_PASSWORD: ENCRYPTED[d8b0fac87fe251cedd26c864ba800480f9e0570440b9eb264265b67411b253a626fb69d519e188e6c9a7f525860ddb26]
|
||||
main_script:
|
||||
- source /etc/automation_environment
|
||||
- main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR
|
||||
|
||||
|
||||
test_image_build_task:
|
||||
<<: *image-build
|
||||
alias: test_image_build
|
||||
# Allow this to run inside a PR w/ [CI:BUILD] only.
|
||||
only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
|
||||
# This takes a LONG time, only run when requested. N/B: Any task
|
||||
# made to depend on this one will block FOREVER unless triggered.
|
||||
# DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
|
||||
trigger_type: manual
|
||||
# Overwrite all 'env', don't push anything, just do the build.
|
||||
env:
|
||||
DRYRUN: 1
|
||||
|
||||
|
||||
# This task is critical. It updates the "last-used by" timestamp stored
|
||||
# in metadata for all VM images. This mechanism functions in tandem with
|
||||
# an out-of-band pruning operation to remove disused VM images.
|
||||
meta_task:
|
||||
name: "VM img. keepalive"
|
||||
alias: meta
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: |
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
GCPNAME: ENCRYPTED[1cf558ae125e3c39ec401e443ad76452b25d790c45eb73d77c83eb059a0f7fd5085ef7e2f7e410b04ea6e83b0aab2eb1]
|
||||
GCPPROJECT: libpod-218412
|
||||
clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
|
||||
script: /usr/local/bin/entrypoint.sh
|
||||
|
||||
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
# of this task.
|
||||
success_task:
|
||||
name: "Total Success"
|
||||
alias: success
|
||||
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
|
||||
depends_on:
|
||||
- validate
|
||||
- doccheck
|
||||
- osx
|
||||
- cross
|
||||
- test_skopeo
|
||||
- image_build
|
||||
- meta
|
||||
container: *smallcontainer
|
||||
env:
|
||||
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
|
||||
TEST_ENVIRON: container
|
||||
clone_script: *noop
|
||||
script: /bin/true
|
||||
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "10:00"
|
||||
timezone: Europe/Berlin
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
105
.github/workflows/check_cirrus_cron.yml
vendored
Normal file
105
.github/workflows/check_cirrus_cron.yml
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
---
|
||||
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
|
||||
|
||||
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
# Required to un-FUBAR default ${{github.workflow}} value
|
||||
name: check_cirrus_cron
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
# Debug-mode can reveal secrets, only enable by a secret value.
|
||||
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
|
||||
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
|
||||
# CSV listing of e-mail addresses for delivery failure or error notices
|
||||
RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io
|
||||
# Filename for table of cron-name to build-id data
|
||||
# (must be in $GITHUB_WORKSPACE/artifacts/)
|
||||
NAME_ID_FILEPATH: './artifacts/name_id.txt'
|
||||
|
||||
jobs:
|
||||
cron_failures:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Avoid duplicating cron_failures.sh in skopeo repo.
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
repository: containers/podman
|
||||
path: '_podman'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get failed cron names and Build IDs
|
||||
id: cron
|
||||
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
shell: bash
|
||||
# Must be inline, since context expressions are used.
|
||||
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
|
||||
run: |
|
||||
set -eo pipefail
|
||||
(
|
||||
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
|
||||
echo ""
|
||||
|
||||
while read -r NAME BID; do
|
||||
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
|
||||
done < "$NAME_ID_FILEPATH"
|
||||
|
||||
echo ""
|
||||
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
|
||||
# Separate content from sendgrid.com automatic footer.
|
||||
echo ""
|
||||
echo ""
|
||||
) > ./artifacts/email_body.txt
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
name: Send failure notification e-mail
|
||||
# Ref: https://github.com/dawidd6/action-send-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Cirrus-CI cron build failures on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: file://./artifacts/email_body.txt
|
||||
|
||||
- if: always()
|
||||
uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2
|
||||
with:
|
||||
name: ${{ github.job }}_artifacts
|
||||
path: artifacts/*
|
||||
|
||||
- if: failure()
|
||||
name: Send error notification e-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Github workflow error on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"
|
||||
29
.github/workflows/stale.yml
vendored
Normal file
29
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
# Please refer to https://github.com/actions/stale/blob/master/action.yml
|
||||
# to see all config knobs of the stale action.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
permissions:
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@98ed4cb500039dbcccf4bd9bedada4d0187f2757 # v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
|
||||
stale-pr-message: 'A friendly reminder that this PR had no activity for 30 days.'
|
||||
stale-issue-label: 'stale-issue'
|
||||
stale-pr-label: 'stale-pr'
|
||||
days-before-stale: 30
|
||||
days-before-close: 365
|
||||
remove-stale-when-updated: true
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,3 +1,10 @@
|
||||
*.1
|
||||
/layers-*
|
||||
/skopeo
|
||||
result
|
||||
/completions/
|
||||
# ignore JetBrains IDEs (GoLand) config folder
|
||||
.idea
|
||||
|
||||
# Ignore the bin directory
|
||||
bin
|
||||
|
||||
25
.travis.yml
25
.travis.yml
@@ -1,25 +0,0 @@
|
||||
language: go
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
- os: osx
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
||||
install:
|
||||
# NOTE: The (brew update) should not be necessary, and slows things down;
|
||||
# we include it as a workaround for https://github.com/Homebrew/brew/issues/3299
|
||||
# ideally Travis should bake the (brew update) into its images
|
||||
# (https://github.com/travis-ci/travis-ci/issues/8552 ), but that’s only going
|
||||
# to happen around November 2017 per https://blog.travis-ci.com/2017-10-16-a-new-default-os-x-image-is-coming .
|
||||
# Remove the (brew update) at that time.
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew install gpgme ; fi
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make vendor && ./hack/tree_status.sh && make check ; fi
|
||||
3
CODE-OF-CONDUCT.md
Normal file
3
CODE-OF-CONDUCT.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## The skopeo Project Community Code of Conduct
|
||||
|
||||
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
||||
@@ -117,37 +117,39 @@ commit automatically with `git commit -s`.
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
Dependencies are managed via [standard go modules](https://golang.org/ref/mod).
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- use `go get -d path/to/dep@version` to add a new line to `go.mod`
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- use `go get -d -u path/to/dep@version` to update the relevant dependency line in `go.mod`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- find out the version of `containers/image` you want to use and note its commit ID. You might also want to use a fork of `containers/image`, in that case note its repo
|
||||
- use `go get -d github.com/$REPO/image/v5@$COMMIT_ID` to download the right version. The command will fetch the dependency and then fail because of a conflict in `go.mod`, this is expected. Note the pseudo-version (eg. `v5.13.1-0.20210707123201-50afbf0a326`)
|
||||
- use `go mod edit -replace=github.com/containers/image/v5=github.com/$REPO/image/v5@$PSEUDO_VERSION` to add a replacement line to `go.mod` (e.g. `replace github.com/containers/image/v5 => github.com/moio/image/v5 v5.13.1-0.20210707123201-50afbf0a3262`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now required by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- as soon as possible after that, in the skopeo PR, use `go mod edit -dropreplace=github.com/containers/image` to remove the `replace` line in `go.mod`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
- after tests complete successfully again, merge the skopeo PR
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
IRC group on `irc.freenode.net` called `container-projects`
|
||||
IRC channel on `irc.libera.chat` called `#container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
|
||||
51
Dockerfile
51
Dockerfile
@@ -1,51 +0,0 @@
|
||||
FROM fedora
|
||||
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man \
|
||||
# storage deps
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
# gpgme bindings deps
|
||||
libassuan-devel gpgme-devel \
|
||||
ostree-devel \
|
||||
gnupg \
|
||||
# OpenShift deps
|
||||
which tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
bats jq podman \
|
||||
golint \
|
||||
&& dnf clean all
|
||||
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
RUN set -x \
|
||||
&& REGISTRY_COMMIT_SCHEMA1=ec87e9b6971d831f0eff752ddb54fb64693e51cd \
|
||||
&& REGISTRY_COMMIT=47a064d4195a9b56133891bbb13620c3ac83a827 \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
RUN set -x \
|
||||
&& export GOPATH=$(mktemp -d) \
|
||||
&& git clone --depth 1 -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
# The sed edits out a "go < 1.5" check which works incorrectly with go ≥ 1.10. \
|
||||
&& sed -i -e 's/\[\[ "\${go_version\[2]}" < "go1.5" ]]/false/' "$GOPATH/src/github.com/openshift/origin/hack/common.sh" \
|
||||
&& (cd "$GOPATH/src/github.com/openshift/origin" && make clean build && make all WHAT=cmd/dockerregistry) \
|
||||
&& cp -a "$GOPATH/src/github.com/openshift/origin/_output/local/bin/linux"/*/* /usr/local/bin \
|
||||
&& cp "$GOPATH/src/github.com/openshift/origin/images/dockerregistry/config.yml" /atomic-registry-config.yml \
|
||||
&& rm -rf "$GOPATH" \
|
||||
&& mkdir /registry
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
RUN go version
|
||||
WORKDIR /go/src/github.com/containers/skopeo
|
||||
COPY . /go/src/github.com/containers/skopeo
|
||||
|
||||
#ENTRYPOINT ["hack/dind"]
|
||||
@@ -1,14 +0,0 @@
|
||||
FROM ubuntu:18.10
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
golang \
|
||||
libbtrfs-dev \
|
||||
git-core \
|
||||
libdevmapper-dev \
|
||||
libgpgme11-dev \
|
||||
go-md2man \
|
||||
libglib2.0-dev \
|
||||
libostree-dev
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/containers/skopeo
|
||||
261
Makefile
261
Makefile
@@ -1,47 +1,82 @@
|
||||
.PHONY: all binary build-container docs docs-in-container build-local clean install install-binary install-completions shell test-integration .install.vndr vendor
|
||||
.PHONY: all binary docs docs-in-container build-local clean install install-binary install-completions shell test-integration .install.vndr vendor vendor-in-container
|
||||
|
||||
ifeq ($(shell uname),Darwin)
|
||||
PREFIX ?= ${DESTDIR}/usr/local
|
||||
DARWIN_BUILD_TAG=containers_image_ostree_stub
|
||||
# On macOS, (brew install gpgme) installs it within /usr/local, but /usr/local/include is not in the default search path.
|
||||
# Rather than hard-code this directory, use gpgme-config. Sadly that must be done at the top-level user
|
||||
# instead of locally in the gpgme subpackage, because cgo supports only pkg-config, not general shell scripts,
|
||||
# and gpgme does not install a pkg-config file.
|
||||
# If gpgme is not installed or gpgme-config can’t be found for other reasons, the error is silently ignored
|
||||
# (and the user will probably find out because the cgo compilation will fail).
|
||||
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
# The following variables very roughly follow https://www.gnu.org/prep/standards/standards.html#Makefile-Conventions .
|
||||
DESTDIR ?=
|
||||
PREFIX ?= /usr/local
|
||||
ifeq ($(shell uname -s),FreeBSD)
|
||||
CONTAINERSCONFDIR ?= /usr/local/etc/containers
|
||||
else
|
||||
PREFIX ?= ${DESTDIR}/usr
|
||||
CONTAINERSCONFDIR ?= /etc/containers
|
||||
endif
|
||||
REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d
|
||||
LOOKASIDEDIR ?= /var/lib/containers/sigstore
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man
|
||||
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
ZSHINSTALLDIR=${PREFIX}/share/zsh/site-functions
|
||||
FISHINSTALLDIR=${PREFIX}/share/fish/vendor_completions.d
|
||||
|
||||
GO ?= go
|
||||
GOBIN := $(shell $(GO) env GOBIN)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
INSTALLDIR=${PREFIX}/bin
|
||||
MANINSTALLDIR=${PREFIX}/share/man
|
||||
CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO ?= go
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
|
||||
# Multiple scripts are sensitive to this value, make sure it's exported/available
|
||||
# N/B: Need to use 'command -v' here for compatibility with MacOS.
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
|
||||
|
||||
GO_BUILD=$(GO) build
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources.
|
||||
# See also hack/make.sh.
|
||||
ifeq ($(shell go help mod >/dev/null 2>&1 && echo true), true)
|
||||
GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor
|
||||
GO:=GO111MODULE=on $(GO)
|
||||
MOD_VENDOR=-mod=vendor
|
||||
endif
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
ifeq ($(shell $(GO) env GOOS), linux)
|
||||
GO_DYN_FLAGS="-buildmode=pie"
|
||||
ifeq ($(GOOS), linux)
|
||||
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
|
||||
GO_DYN_FLAGS="-buildmode=pie"
|
||||
endif
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
# set env like gobuildtag?
|
||||
CONTAINER_CMD := ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" #$(CONTAINER_ENVS)
|
||||
|
||||
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
|
||||
# You can increase test output verbosity with the option '-test.vv'.
|
||||
# You can select certain tests to run, with `-test.run <regex>` for example:
|
||||
#
|
||||
# make test-unit TESTFLAGS='-test.run ^TestManifestDigest$'
|
||||
#
|
||||
# For integration test, we use [gocheck](https://labix.org/gocheck).
|
||||
# You can increase test output verbosity with the option '-check.vv'.
|
||||
# You can limit test selection with `-check.f <regex>`, for example:
|
||||
#
|
||||
# make test-integration TESTFLAGS='-check.f CopySuite.TestCopy.*'
|
||||
export TESTFLAGS ?= -v -check.v -test.timeout=15m
|
||||
|
||||
# This is assumed to be set non-empty when operating inside a CI/automation environment
|
||||
CI ?=
|
||||
|
||||
# This env. var. is interpreted by some tests as a permission to
|
||||
# modify local configuration files and services.
|
||||
export SKOPEO_CONTAINER_TESTS ?= $(if $(CI),1,0)
|
||||
|
||||
# This is a compromise, we either use a container for this or require
|
||||
# the local user to have a compatible python3 development environment.
|
||||
# Define it as a "resolve on use" variable to avoid calling out when possible
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN ?= $(shell hack/get_fqin.sh)
|
||||
CONTAINER_CMD ?= ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" -e CI=$(CI) -e SKOPEO_CONTAINER_TESTS=1
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
@@ -49,35 +84,44 @@ INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
|
||||
ifeq ($(INTERACTIVE), 1)
|
||||
CONTAINER_CMD += -t
|
||||
endif
|
||||
CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
CONTAINER_GOSRC = /src/github.com/containers/skopeo
|
||||
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
OSTREE_BUILD_TAG = $(shell hack/ostree_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(OSTREE_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
LIBSUBID_BUILD_TAG = $(shell hack/libsubid_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(LIBSUBID_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
override BUILDTAGS = containers_image_ostree_stub exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
|
||||
override BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
|
||||
endif
|
||||
|
||||
# make all DEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs-in-container
|
||||
all: bin/skopeo docs
|
||||
|
||||
codespell:
|
||||
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L fpr,uint,iff,od,ERRO -w
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo "Defaults to building bin/skopeo and docs"
|
||||
@echo
|
||||
@echo " * 'install' - Install binaries and documents to system locations"
|
||||
@echo " * 'binary' - Build skopeo with a container"
|
||||
@echo " * 'binary-local' - Build skopeo locally"
|
||||
@echo " * 'bin/skopeo' - Build skopeo locally"
|
||||
@echo " * 'test-unit' - Execute unit tests"
|
||||
@echo " * 'test-integration' - Execute integration tests"
|
||||
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
|
||||
@@ -85,98 +129,127 @@ help:
|
||||
@echo " * 'shell' - Run the built image and attach to a shell"
|
||||
@echo " * 'clean' - Clean artifacts"
|
||||
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
# Do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
$(CONTAINER_RUN) make bin/skopeo $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using containers
|
||||
binary-local:
|
||||
$(GPGME_ENV) $(GO_BUILD) ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
binary-local-static:
|
||||
$(GPGME_ENV) $(GO_BUILD) -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
build-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
.PHONY: bin/skopeo
|
||||
bin/skopeo:
|
||||
$(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
bin/skopeo.%:
|
||||
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO) build $(MOD_VENDOR) ${SKOPEO_LDFLAGS} -tags "containers_image_openpgp $(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
local-cross: bin/skopeo.darwin.amd64 bin/skopeo.linux.arm bin/skopeo.linux.arm64 bin/skopeo.windows.386.exe bin/skopeo.windows.amd64.exe
|
||||
|
||||
$(MANPAGES): %: %.md
|
||||
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
ifneq ($(DISABLE_DOCS), 1)
|
||||
sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
endif
|
||||
|
||||
docs: $(MANPAGES)
|
||||
|
||||
docs-in-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
${CONTAINER_RUN} $(MAKE) docs $(if $(DEBUG),DEBUG=$(DEBUG))
|
||||
|
||||
.PHONY: completions
|
||||
completions: bin/skopeo
|
||||
install -d -m 755 completions/bash completions/zsh completions/fish completions/powershell
|
||||
./bin/skopeo completion bash >| completions/bash/skopeo
|
||||
./bin/skopeo completion zsh >| completions/zsh/_skopeo
|
||||
./bin/skopeo completion fish >| completions/fish/skopeo.fish
|
||||
./bin/skopeo completion powershell >| completions/powershell/skopeo.ps1
|
||||
|
||||
clean:
|
||||
rm -f skopeo docs/*.1
|
||||
rm -rf bin docs/*.1 completions/
|
||||
|
||||
install: install-binary install-docs install-completions
|
||||
install -d -m 755 ${SIGSTOREDIR}
|
||||
install -d -m 755 ${CONTAINERSSYSCONFIGDIR}
|
||||
install -m 644 default-policy.json ${CONTAINERSSYSCONFIGDIR}/policy.json
|
||||
install -d -m 755 ${REGISTRIESDDIR}
|
||||
install -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
|
||||
install -d -m 755 ${DESTDIR}${LOOKASIDEDIR}
|
||||
install -d -m 755 ${DESTDIR}${CONTAINERSCONFDIR}
|
||||
install -m 644 default-policy.json ${DESTDIR}${CONTAINERSCONFDIR}/policy.json
|
||||
install -d -m 755 ${DESTDIR}${REGISTRIESDDIR}
|
||||
install -m 644 default.yaml ${DESTDIR}${REGISTRIESDDIR}/default.yaml
|
||||
|
||||
install-binary: ./skopeo
|
||||
install -d -m 755 ${INSTALLDIR}
|
||||
install -m 755 skopeo ${INSTALLDIR}/skopeo
|
||||
install-binary: bin/skopeo
|
||||
install -d -m 755 ${DESTDIR}${BINDIR}
|
||||
install -m 755 bin/skopeo ${DESTDIR}${BINDIR}/skopeo
|
||||
|
||||
install-docs: docs
|
||||
install -d -m 755 ${MANINSTALLDIR}/man1
|
||||
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
|
||||
ifneq ($(DISABLE_DOCS), 1)
|
||||
install -d -m 755 ${DESTDIR}${MANDIR}/man1
|
||||
install -m 644 docs/*.1 ${DESTDIR}${MANDIR}/man1
|
||||
endif
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${BASHINSTALLDIR}
|
||||
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
|
||||
install-completions: completions
|
||||
install -d -m 755 ${DESTDIR}${BASHINSTALLDIR}
|
||||
install -m 644 completions/bash/skopeo ${DESTDIR}${BASHINSTALLDIR}
|
||||
install -d -m 755 ${DESTDIR}${ZSHINSTALLDIR}
|
||||
install -m 644 completions/zsh/_skopeo ${DESTDIR}${ZSHINSTALLDIR}
|
||||
install -d -m 755 ${DESTDIR}${FISHINSTALLDIR}
|
||||
install -m 644 completions/fish/skopeo.fish ${DESTDIR}${FISHINSTALLDIR}
|
||||
# There is no common location for powershell files so do not install them. Users have to source the file from their powershell profile.
|
||||
|
||||
shell: build-container
|
||||
shell:
|
||||
$(CONTAINER_RUN) bash
|
||||
|
||||
check: validate test-unit test-integration test-system
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
test-integration:
|
||||
$(CONTAINER_RUN) $(MAKE) test-integration-local
|
||||
|
||||
|
||||
# Intended for CI, assumed to be running in quay.io/libpod/skopeo_cidev container.
|
||||
test-integration-local: bin/skopeo
|
||||
hack/make.sh test-integration
|
||||
|
||||
# complicated set of options needed to run podman-in-podman
|
||||
test-system: build-container
|
||||
# TODO: The $(RM) command will likely fail w/o `podman unshare`
|
||||
test-system:
|
||||
DTEMP=$(shell mktemp -d --tmpdir=/var/tmp podman-tmp.XXXXXX); \
|
||||
$(CONTAINER_CMD) --privileged --net=host \
|
||||
-v $$DTEMP:/var/lib/containers:Z \
|
||||
"$(IMAGE)" \
|
||||
bash -c 'BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-system'; \
|
||||
$(CONTAINER_CMD) --privileged \
|
||||
-v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) \
|
||||
-v $$DTEMP:/var/lib/containers:Z -v /run/systemd/journal/socket:/run/systemd/journal/socket \
|
||||
"$(SKOPEO_CIDEV_CONTAINER_FQIN)" \
|
||||
$(MAKE) test-system-local; \
|
||||
rc=$$?; \
|
||||
$(RM) -rf $$DTEMP; \
|
||||
-$(RM) -rf $$DTEMP; \
|
||||
exit $$rc
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
# Intended for CI, assumed to already be running in quay.io/libpod/skopeo_cidev container.
|
||||
test-system-local: bin/skopeo
|
||||
hack/make.sh test-system
|
||||
|
||||
validate: build-container
|
||||
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
test-unit:
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences
|
||||
$(CONTAINER_RUN) $(MAKE) test-unit-local
|
||||
|
||||
validate:
|
||||
$(CONTAINER_RUN) $(MAKE) validate-local
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
test-all-local: validate-local validate-docs test-unit-local
|
||||
|
||||
.PHONY: validate-local
|
||||
validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
BUILDTAGS="${BUILDTAGS}" hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
# This invokes bin/skopeo, hence cannot be run as part of validate-local
|
||||
.PHONY: validate-docs
|
||||
validate-docs:
|
||||
hack/man-page-checker
|
||||
hack/xref-helpmsgs-manpages
|
||||
|
||||
test-unit-local: bin/skopeo
|
||||
$(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor:
|
||||
export GO111MODULE=on \
|
||||
$(GO) mod tidy && \
|
||||
$(GO) mod vendor && \
|
||||
$(GO) mod verify
|
||||
$(GO) mod tidy
|
||||
$(GO) mod vendor
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
rpm:
|
||||
rpkg local
|
||||
|
||||
17
OWNERS
Normal file
17
OWNERS
Normal file
@@ -0,0 +1,17 @@
|
||||
approvers:
|
||||
- mtrmac
|
||||
- lsm5
|
||||
- TomSweeneyRedHat
|
||||
- rhatdan
|
||||
- vrothberg
|
||||
reviewers:
|
||||
- ashley-cui
|
||||
- giuseppe
|
||||
- containers/image-maintainers
|
||||
- lsm5
|
||||
- mtrmac
|
||||
- QiWang19
|
||||
- rhatdan
|
||||
- runcom
|
||||
- TomSweeneyRedHat
|
||||
- vrothberg
|
||||
287
README.md
287
README.md
@@ -7,29 +7,34 @@ skopeo [ as well as the original Docker v2 images.
|
||||
|
||||
Skopeo works with API V2 registries such as Docker registries, the Atomic registry, private registries, local directories and local OCI-layout directories. Skopeo does not require a daemon to be running to perform these operations which consist of:
|
||||
Skopeo works with API V2 container image registries such as [docker.io](https://docker.io) and [quay.io](https://quay.io) registries, private registries, local directories and local OCI-layout directories. Skopeo can perform operations which consist of:
|
||||
|
||||
* Copying an image from and to various storage mechanisms.
|
||||
For example you can copy images from one registry to another, without requiring privilege.
|
||||
* Inspecting a remote image showing its properties including its layers, without requiring you to pull the image to the host.
|
||||
* Deleting an image from an image repository.
|
||||
* Syncing an external image repository to an internal registry for air-gapped deployments.
|
||||
* When required by the repository, skopeo can pass the appropriate credentials and certificates for authentication.
|
||||
|
||||
Skopeo operates on the following image and repository types:
|
||||
|
||||
* containers-storage:docker-reference
|
||||
An image located in a local containers/storage image store. Location and image store specified in /etc/containers/storage.conf
|
||||
An image located in a local containers/storage image store. Both the location and image store are specified in /etc/containers/storage.conf. (This is the backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
|
||||
|
||||
* dir:path
|
||||
An existing local directory path storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
* docker://docker-reference
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in $HOME/.docker/config.json, which is set e.g. using (docker login).
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `skopeo login`.
|
||||
|
||||
* docker-archive:path[:docker-reference]
|
||||
An image is stored in the `docker save` formated file. docker-reference is only used when creating such a file, and it must not contain a digest.
|
||||
An image is stored in a `docker save`-formatted file. docker-reference is only used when creating such a file, and it must not contain a digest.
|
||||
|
||||
* docker-daemon:docker-reference
|
||||
An image docker-reference stored in the docker daemon internal storage. docker-reference must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
|
||||
@@ -37,213 +42,173 @@ Skopeo works with API V2 registries such as Docker registries, the Atomic regist
|
||||
* oci:path:tag
|
||||
An image tag in a directory compliant with "Open Container Image Layout Specification" at path.
|
||||
|
||||
* ostree:image[@/absolute/repo/path]
|
||||
An image in local OSTree repository. /absolute/repo/path defaults to /ostree/repo.
|
||||
|
||||
Inspecting a repository
|
||||
-
|
||||
`skopeo` is able to _inspect_ a repository on a Docker registry and fetch images layers.
|
||||
## Inspecting a repository
|
||||
`skopeo` is able to _inspect_ a repository on a container registry and fetch images layers.
|
||||
The _inspect_ command fetches the repository's manifest and it is able to show you a `docker inspect`-like
|
||||
json output about a whole repository or a tag. This tool, in contrast to `docker inspect`, helps you gather useful information about
|
||||
a repository or a tag before pulling it (using disk space). The inspect command can show you which tags are available for the given
|
||||
repository, the labels the image has, the creation date and operating system of the image and more.
|
||||
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
# show properties of fedora:latest
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
|
||||
#### Show properties of fedora:latest
|
||||
```console
|
||||
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Tag": "latest",
|
||||
"Digest": "sha256:cfd8f071bf8da7a466748f522406f7ae5908d002af1b1a1c0dcf893e183e5b32",
|
||||
"Name": "registry.fedoraproject.org/fedora",
|
||||
"Digest": "sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
"24",
|
||||
"25",
|
||||
"26-modular",
|
||||
...
|
||||
],
|
||||
"Created": "2016-03-04T18:40:02.92155334Z",
|
||||
"DockerVersion": "1.9.1",
|
||||
"Labels": {},
|
||||
"Created": "2020-04-29T06:48:16Z",
|
||||
"DockerVersion": "1.10.1",
|
||||
"Labels": {
|
||||
"license": "MIT",
|
||||
"name": "fedora",
|
||||
"vendor": "Fedora Project",
|
||||
"version": "32"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:236608c7b546e2f4e7223526c74fc71470ba06d46ec82aeb402e704bfdee02a2",
|
||||
"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
"sha256:3088721d7dbf674fc0be64cd3cf00c25aab921cacf35fa0e7b1578500a3e1653"
|
||||
],
|
||||
"Env": [
|
||||
"DISTTAG=f32container",
|
||||
"FGC=f32",
|
||||
"container=oci"
|
||||
]
|
||||
}
|
||||
|
||||
# show unverifed image's digest
|
||||
$ skopeo inspect docker://docker.io/fedora:rawhide | jq '.Digest'
|
||||
"sha256:905b4846938c8aef94f52f3e41a11398ae5b40f5855fb0e40ed9c157e721d7f8"
|
||||
```
|
||||
|
||||
Copying images
|
||||
-
|
||||
`skopeo` can copy container images between various storage mechanisms, including:
|
||||
* Docker distribution based registries
|
||||
#### Show container configuration from `fedora:latest`
|
||||
|
||||
- The Docker Hub, OpenShift, GCR, Artifactory, Quay ...
|
||||
```console
|
||||
$ skopeo inspect --config docker://registry.fedoraproject.org/fedora:latest | jq
|
||||
{
|
||||
"created": "2020-04-29T06:48:16Z",
|
||||
"architecture": "amd64",
|
||||
"os": "linux",
|
||||
"config": {
|
||||
"Env": [
|
||||
"DISTTAG=f32container",
|
||||
"FGC=f32",
|
||||
"container=oci"
|
||||
],
|
||||
"Cmd": [
|
||||
"/bin/bash"
|
||||
],
|
||||
"Labels": {
|
||||
"license": "MIT",
|
||||
"name": "fedora",
|
||||
"vendor": "Fedora Project",
|
||||
"version": "32"
|
||||
}
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:a4c0fa2b217d3fd63d51e55a6fd59432e543d499c0df2b1acd48fbe424f2ddd1"
|
||||
]
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2020-04-29T06:48:16Z",
|
||||
"comment": "Created by Image Factory"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
#### Show unverified image's digest
|
||||
```console
|
||||
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest | jq '.Digest'
|
||||
"sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9"
|
||||
```
|
||||
|
||||
## Copying images
|
||||
|
||||
`skopeo` can copy container images between various storage mechanisms, including:
|
||||
* Container registries
|
||||
|
||||
- The Quay, Docker Hub, OpenShift, GCR, Artifactory ...
|
||||
|
||||
* Container Storage backends
|
||||
|
||||
- Docker daemon storage
|
||||
- [github.com/containers/storage](https://github.com/containers/storage) (Backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
|
||||
|
||||
- github.com/containers/storage (Backend for CRI-O, Buildah and friends)
|
||||
- Docker daemon storage
|
||||
|
||||
* Local directories
|
||||
|
||||
* Local OCI-layout directories
|
||||
|
||||
```sh
|
||||
$ skopeo copy docker://busybox:1-glibc atomic:myns/unsigned:streaming
|
||||
$ skopeo copy docker://busybox:latest dir:existingemptydirectory
|
||||
$ skopeo copy docker://busybox:latest oci:busybox_ocilayout:latest
|
||||
```console
|
||||
$ skopeo copy docker://quay.io/buildah/stable docker://registry.internal.company.com/buildah
|
||||
$ skopeo copy oci:busybox_ocilayout:latest dir:existingemptydirectory
|
||||
```
|
||||
|
||||
Deleting images
|
||||
-
|
||||
For example,
|
||||
```sh
|
||||
## Deleting images
|
||||
```console
|
||||
$ skopeo delete docker://localhost:5000/imagename:latest
|
||||
```
|
||||
|
||||
Private registries with authentication
|
||||
-
|
||||
When interacting with private registries, `skopeo` first looks for `--creds` (for `skopeo inspect|delete`) or `--src-creds|--dest-creds` (for `skopeo copy`) flags. If those aren't provided, it looks for the Docker's cli config file (usually located at `$HOME/.docker/config.json`) to get the credentials needed to authenticate. The ultimate fallback, as Docker does, is to provide an empty authentication when interacting with those registries.
|
||||
## Syncing registries
|
||||
```console
|
||||
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
|
||||
```
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
$ cat /home/runcom/.docker/config.json
|
||||
{
|
||||
"auths": {
|
||||
"myregistrydomain.com:5000": {
|
||||
"auth": "dGVzdHVzZXI6dGVzdHBhc3N3b3Jk",
|
||||
"email": "stuf@ex.cm"
|
||||
}
|
||||
}
|
||||
}
|
||||
## Authenticating to a registry
|
||||
|
||||
# we can see I'm already authenticated via docker login so everything will be fine
|
||||
#### Private registries with authentication
|
||||
skopeo uses credentials from the --creds (for skopeo inspect|delete) or --src-creds|--dest-creds (for skopeo copy) flags, if set; otherwise it uses configuration set by skopeo login, podman login, buildah login, or docker login.
|
||||
|
||||
```console
|
||||
$ skopeo login --username USER myregistrydomain.com:5000
|
||||
Password:
|
||||
$ skopeo inspect docker://myregistrydomain.com:5000/busybox
|
||||
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
|
||||
$ skopeo logout myregistrydomain.com:5000
|
||||
```
|
||||
|
||||
# let's try now to fake a non existent Docker's config file
|
||||
$ cat /home/runcom/.docker/config.json
|
||||
{}
|
||||
#### Using --creds directly
|
||||
|
||||
$ skopeo inspect docker://myregistrydomain.com:5000/busybox
|
||||
FATA[0000] unauthorized: authentication required
|
||||
|
||||
# passing --creds - we can see that everything goes fine
|
||||
```console
|
||||
$ skopeo inspect --creds=testuser:testpassword docker://myregistrydomain.com:5000/busybox
|
||||
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
|
||||
```
|
||||
|
||||
# skopeo copy example:
|
||||
```console
|
||||
$ skopeo copy --src-creds=testuser:testpassword docker://myregistrydomain.com:5000/private oci:local_oci_image
|
||||
```
|
||||
If your cli config is found but it doesn't contain the necessary credentials for the queried registry
|
||||
you'll get an error. You can fix this by either logging in (via `docker login`) or providing `--creds` or `--src-creds|--dest-creds`.
|
||||
|
||||
|
||||
Obtaining skopeo
|
||||
[Obtaining skopeo](./install.md)
|
||||
-
|
||||
`skopeo` may already be packaged in your distribution, for example on Fedora 23 and later you can install it using
|
||||
```sh
|
||||
$ sudo dnf install skopeo
|
||||
```
|
||||
for openSUSE:
|
||||
```sh
|
||||
$ sudo zypper install skopeo
|
||||
```
|
||||
|
||||
For a detailed description how to install or build skopeo, see
|
||||
[install.md](./install.md).
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
To build the `skopeo` binary you need at least Go 1.9.
|
||||
|
||||
There are two ways to build skopeo: in a container, or locally without a container. Choose the one which better matches your needs and environment.
|
||||
|
||||
### Building without a container
|
||||
Building without a container requires a bit more manual work and setup in your environment, but it is more flexible:
|
||||
- It should work in more environments (e.g. for native macOS builds)
|
||||
- It does not require root privileges (after dependencies are installed)
|
||||
- It is faster, therefore more convenient for developing `skopeo`.
|
||||
|
||||
Install the necessary dependencies:
|
||||
```sh
|
||||
# Fedora:
|
||||
sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel ostree-devel
|
||||
|
||||
# Ubuntu (`libbtrfs-dev` requires Ubuntu 18.10 and above):
|
||||
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev libostree-dev
|
||||
|
||||
# macOS:
|
||||
brew install gpgme
|
||||
|
||||
# openSUSE
|
||||
sudo zypper install libgpgme-devel device-mapper-devel libbtrfs-devel glib2-devel
|
||||
```
|
||||
|
||||
Make sure to clone this repository in your `GOPATH` - otherwise compilation fails.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
|
||||
$ cd $GOPATH/src/github.com/containers/skopeo && make binary-local
|
||||
```
|
||||
|
||||
### Building in a container
|
||||
Building in a container is simpler, but more restrictive:
|
||||
- It requires the `docker` command and the ability to run Linux containers
|
||||
- The created executable is a Linux executable, and depends on dynamic libraries which may only be available only in a container of a similar Linux distribution.
|
||||
|
||||
```sh
|
||||
$ make binary # Or (make all) to also build documentation, see below.
|
||||
```
|
||||
|
||||
To build a pure-Go static binary (disables ostree, devicemapper, btrfs, and gpgme):
|
||||
|
||||
```sh
|
||||
$ make binary-static DISABLE_CGO=1
|
||||
```
|
||||
|
||||
### Building documentation
|
||||
To build the manual you will need go-md2man.
|
||||
```sh
|
||||
Debian$ sudo apt-get install go-md2man
|
||||
Fedora$ sudo dnf install go-md2man
|
||||
```
|
||||
Then
|
||||
```sh
|
||||
$ make docs
|
||||
```
|
||||
|
||||
### Installation
|
||||
Finally, after the binary and documentation is built:
|
||||
```sh
|
||||
$ sudo make install
|
||||
```
|
||||
|
||||
TODO
|
||||
-
|
||||
- list all images on registry?
|
||||
- registry v2 search?
|
||||
- show repo tags via flag or when reference isn't tagged or digested
|
||||
- support rkt/appc image spec
|
||||
|
||||
NOT TODO
|
||||
-
|
||||
- provide a _format_ flag - just use the awesome [jq](https://stedolan.github.io/jq/)
|
||||
|
||||
CONTRIBUTING
|
||||
Contributing
|
||||
-
|
||||
|
||||
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------------------------|
|
||||
| [skopeo-copy(1)](/docs/skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](/docs/skopeo-delete.1.md) | Mark the image-name for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](/docs/skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-list-tags(1)](/docs/skopeo-list-tags.1.md) | Return a list of tags for the transport-specific image repository. |
|
||||
| [skopeo-login(1)](/docs/skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](/docs/skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
skopeo is licensed under the Apache License, Version 2.0. See
|
||||
|
||||
3
SECURITY.md
Normal file
3
SECURITY.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Security and Disclosure Information Policy for the skopeo Project
|
||||
|
||||
The skopeo Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
|
||||
@@ -1,34 +0,0 @@
|
||||
// +build !containers_image_openpgp
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
This is a pretty horrible workaround. Due to a glibc bug
|
||||
https://bugzilla.redhat.com/show_bug.cgi?id=1326903 , we must ensure we link
|
||||
with -lgpgme before -lpthread. Such arguments come from various packages
|
||||
using cgo, and the ordering of these arguments is, with current (go tool link),
|
||||
dependent on the order in which the cgo-using packages are found in a
|
||||
breadth-first search following dependencies, starting from “main”.
|
||||
|
||||
Thus, if
|
||||
import "net"
|
||||
is processed before
|
||||
import "…/skopeo/signature"
|
||||
it will, in the next level of the BFS, pull in "runtime/cgo" (a dependency of
|
||||
"net") before "mtrmac/gpgme" (a dependency of "…/skopeo/signature"), causing
|
||||
-lpthread (used by "runtime/cgo") to be used before -lgpgme.
|
||||
|
||||
This might be possible to work around by careful import ordering, or by removing
|
||||
a direct dependency on "net", but that would be very fragile.
|
||||
|
||||
So, until the above bug is fixed, add -lgpgme directly in the "main" package
|
||||
to ensure the needed build order.
|
||||
|
||||
Unfortunately, this workaround needs to be applied at the top level of any user
|
||||
of "…/skopeo/signature"; it cannot be added to "…/skopeo/signature" itself,
|
||||
by that time this package is first processed by the linker, a -lpthread may
|
||||
already be queued and it would be too late.
|
||||
*/
|
||||
|
||||
// #cgo LDFLAGS: -lgpgme
|
||||
import "C"
|
||||
16
cmd/skopeo/completions.go
Normal file
16
cmd/skopeo/completions.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// autocompleteSupportedTransports list all supported transports with the colon suffix.
|
||||
func autocompleteSupportedTransports(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
tps := transports.ListNames()
|
||||
suggestions := make([]string, 0, len(tps))
|
||||
for _, tp := range tps {
|
||||
suggestions = append(suggestions, tp+":")
|
||||
}
|
||||
return suggestions, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
@@ -4,87 +4,122 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/copy"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/urfave/cli"
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
enchelpers "github.com/containers/ocicrypt/helpers"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
additionalTags cli.StringSlice // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
|
||||
global *globalOptions
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.Options
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing (for either signature format, but only one of them)
|
||||
signIdentity string // Identity of the signed image, must be a fully specified docker reference
|
||||
digestFile string // Write digest to this file
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
multiArch commonFlag.OptionalString // How to handle multi architecture images
|
||||
preserveDigests bool // Preserve digests during copy
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) cli.Command {
|
||||
func copyCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
deprecatedTLSVerifyFlags, deprecatedTLSVerifyOpt := deprecatedTLSVerifyFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "dest-", "dcreds")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := copyOptions{global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
deprecatedTLSVerify: deprecatedTLSVerifyOpt,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "copy [command options] SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Short: "Copy an IMAGE-NAME from one location to another",
|
||||
Long: fmt.Sprintf(`Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
|
||||
return cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&deprecatedTLSVerifyFlags)
|
||||
flags.AddFlagSet(&srcFlags)
|
||||
flags.AddFlagSet(&destFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
flags.StringSliceVar(&opts.additionalTags, "additional-tag", []string{}, "additional tags (supports docker-archive)")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress output information when copying images")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.Var(commonFlag.NewOptionalStringValue(&opts.multiArch), "multi-arch", `How to handle multi-architecture images (system, all, or index-only)`)
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "Read a passphrase for signing an image from `PATH`")
|
||||
flags.StringVar(&opts.signIdentity, "sign-identity", "", "Identity of signed image, must be a fully specified docker reference. Defaults to the target docker reference.")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
flags.IntSliceVar(&opts.encryptLayer, "encrypt-layer", []int{}, "*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)")
|
||||
flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", []string{}, "*Experimental* key needed to decrypt the image")
|
||||
return cmd
|
||||
}
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: commandAction(opts.run),
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: append(append(append([]cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
Value: &opts.additionalTags, // Surprisingly StringSliceFlag does not support Destination:, but modifies Value: in place.
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "Suppress output information when copying images",
|
||||
Destination: &opts.quiet,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
Destination: &opts.removeSignatures,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
Destination: &opts.signByFingerprint,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
Value: newOptionalStringValue(&opts.format),
|
||||
},
|
||||
}, sharedFlags...), srcFlags...), destFlags...),
|
||||
// parseMultiArch parses the list processing selection
|
||||
// It returns the copy.ImageListSelection to use with image.Copy option
|
||||
func parseMultiArch(multiArch string) (copy.ImageListSelection, error) {
|
||||
switch multiArch {
|
||||
case "system":
|
||||
return copy.CopySystemImage, nil
|
||||
case "all":
|
||||
return copy.CopyAllImages, nil
|
||||
// There is no CopyNoImages value in copy.ImageListSelection, but because we
|
||||
// don't provide an option to select a set of images to copy, we can use
|
||||
// CopySpecificImages.
|
||||
case "index-only":
|
||||
return copy.CopySpecificImages, nil
|
||||
// We don't expose CopySpecificImages other than index-only above, because
|
||||
// we currently don't provide an option to choose the images to copy. That
|
||||
// could be added in the future.
|
||||
default:
|
||||
return copy.CopySystemImage, fmt.Errorf("unknown multi-arch option %q. Choose one of the supported options: 'system', 'all', or 'index-only'", multiArch)
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
opts.deprecatedTLSVerify.warnIfUsed([]string{"--src-tls-verify", "--dest-tls-verify"})
|
||||
imageNames := args
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
|
||||
@@ -95,7 +130,11 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "tearing down policy context", err)
|
||||
}
|
||||
}()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
@@ -116,16 +155,10 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
switch opts.format.value {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
|
||||
if opts.format.Present() {
|
||||
manifestType, err = parseManifestFormat(opts.format.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,13 +180,116 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if opts.quiet {
|
||||
stdout = nil
|
||||
}
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
return err
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.multiArch.Present() && opts.all {
|
||||
return fmt.Errorf("Cannot use --all and --multi-arch flags together")
|
||||
}
|
||||
if opts.multiArch.Present() {
|
||||
imageListSelection, err = parseMultiArch(opts.multiArch.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
}
|
||||
|
||||
if len(opts.encryptionKeys) > 0 && len(opts.decryptionKeys) > 0 {
|
||||
return fmt.Errorf("--encryption-key and --decryption-key cannot be specified together")
|
||||
}
|
||||
|
||||
var encLayers *[]int
|
||||
var encConfig *encconfig.EncryptConfig
|
||||
var decConfig *encconfig.DecryptConfig
|
||||
|
||||
if len(opts.encryptLayer) > 0 && len(opts.encryptionKeys) == 0 {
|
||||
return fmt.Errorf("--encrypt-layer can only be used with --encryption-key")
|
||||
}
|
||||
|
||||
if len(opts.encryptionKeys) > 0 {
|
||||
// encryption
|
||||
p := opts.encryptLayer
|
||||
encLayers = &p
|
||||
encryptionKeys := opts.encryptionKeys
|
||||
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid encryption keys: %v", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
|
||||
encConfig = cc.EncryptConfig
|
||||
}
|
||||
|
||||
if len(opts.decryptionKeys) > 0 {
|
||||
// decryption
|
||||
decryptionKeys := opts.decryptionKeys
|
||||
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid decryption keys: %v", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
|
||||
decConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
// c/image/copy.Image does allow creating both simple signing and sigstore signatures simultaneously,
|
||||
// with independent passphrases, but that would make the CLI probably too confusing.
|
||||
// For now, use the passphrase with either, but only one of them.
|
||||
if opts.signPassphraseFile != "" && opts.signByFingerprint != "" && opts.signBySigstorePrivateKey != "" {
|
||||
return fmt.Errorf("Only one of --sign-by and sign-by-sigstore-private-key can be used with sign-passphrase-file")
|
||||
}
|
||||
var passphrase string
|
||||
if opts.signPassphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} else if opts.signBySigstorePrivateKey != "" {
|
||||
p, err := promptForPassphrase(opts.signBySigstorePrivateKey, os.Stdin, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} // opts.signByFingerprint triggers a GPG-agent passphrase prompt, possibly using a more secure channel, so we usually shouldn’t prompt ourselves if no passphrase was explicitly provided.
|
||||
|
||||
var signIdentity reference.Named = nil
|
||||
if opts.signIdentity != "" {
|
||||
signIdentity, err = reference.ParseNamed(opts.signIdentity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse --sign-identity: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
|
||||
SignIdentity: signIdentity,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
ImageListSelection: imageListSelection,
|
||||
PreserveDigests: opts.preserveDigests,
|
||||
OciDecryptConfig: decConfig,
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.digestFile != "" {
|
||||
manifestDigest, err := manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = os.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
|
||||
@@ -6,38 +6,45 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func deleteCmd(global *globalOptions) cli.Command {
|
||||
func deleteCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := deleteOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Delete an "IMAGE_NAME" from a transport
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete [command options] IMAGE-NAME",
|
||||
Short: "Delete image IMAGE-NAME",
|
||||
Long: fmt.Sprintf(`Delete an "IMAGE_NAME" from a transport
|
||||
Supported transports:
|
||||
%s
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo delete docker://registry.example.com/example/pause:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
@@ -62,5 +69,8 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// optionalBool is a boolean with a separate presence flag.
|
||||
type optionalBool struct {
|
||||
present bool
|
||||
value bool
|
||||
}
|
||||
|
||||
// optionalBool is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Bool, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalBool to (pretend to) force callers to use
|
||||
// newOptionalBool
|
||||
type optionalBoolValue optionalBool
|
||||
|
||||
func newOptionalBoolValue(p *optionalBool) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalBoolValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ob.value = v
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true
|
||||
}
|
||||
return strconv.FormatBool(ob.value)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optionalString is a string with a separate presence flag.
|
||||
type optionalString struct {
|
||||
present bool
|
||||
value string
|
||||
}
|
||||
|
||||
// optionalString is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.String, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalString to (pretend to) force callers to use
|
||||
// newoptionalString
|
||||
type optionalStringValue optionalString
|
||||
|
||||
func newOptionalStringValue(p *optionalString) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalStringValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) Set(s string) error {
|
||||
ob.value = s
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""}
|
||||
}
|
||||
return ob.value
|
||||
}
|
||||
@@ -1,239 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func TestOptionalBoolSet(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input string
|
||||
accepted bool
|
||||
value bool
|
||||
}{
|
||||
// Valid inputs documented for strconv.ParseBool == flag.BoolVar
|
||||
{"1", true, true},
|
||||
{"t", true, true},
|
||||
{"T", true, true},
|
||||
{"TRUE", true, true},
|
||||
{"true", true, true},
|
||||
{"True", true, true},
|
||||
{"0", true, false},
|
||||
{"f", true, false},
|
||||
{"F", true, false},
|
||||
{"FALSE", true, false},
|
||||
{"false", true, false},
|
||||
{"False", true, false},
|
||||
// A few invalid inputs
|
||||
{"", false, false},
|
||||
{"yes", false, false},
|
||||
{"no", false, false},
|
||||
{"2", false, false},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
require.False(t, ob.present)
|
||||
err := v.Set(c.input)
|
||||
if c.accepted {
|
||||
assert.NoError(t, err, c.input)
|
||||
assert.Equal(t, c.value, ob.value)
|
||||
} else {
|
||||
assert.Error(t, err, c.input)
|
||||
assert.False(t, ob.present) // Just to be extra paranoid.
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOB, commandOB optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OB",
|
||||
Value: newOptionalBoolValue(&globalOB),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OB",
|
||||
Value: newOptionalBoolValue(&commandOB),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOB.present)
|
||||
assert.False(t, commandOB.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalBoolString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalBool
|
||||
expected string
|
||||
}{
|
||||
{optionalBool{present: true, value: true}, "true"},
|
||||
{optionalBool{present: true, value: false}, "false"},
|
||||
{optionalBool{present: false, value: true}, ""},
|
||||
{optionalBool{present: false, value: false}, ""},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
ob = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBoolIsBoolFlag(t *testing.T) {
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOB optionalBool
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalBool{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OB=true", "1", "2"}, optionalBool{present: true, value: true}, []string{"1", "2"}}, // --OB=true
|
||||
{[]string{"--OB=false", "1", "2"}, optionalBool{present: true, value: false}, []string{"1", "2"}}, // --OB=false
|
||||
{[]string{"--OB", "true", "1", "2"}, optionalBool{present: true, value: true}, []string{"true", "1", "2"}}, // --OB true
|
||||
{[]string{"--OB", "false", "1", "2"}, optionalBool{present: true, value: true}, []string{"false", "1", "2"}}, // --OB false
|
||||
} {
|
||||
var ob optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OB",
|
||||
Value: newOptionalBoolValue(&ob),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOB, ob)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringSet(t *testing.T) {
|
||||
// Really just a smoke test, but differentiating between not present and empty.
|
||||
for _, c := range []string{"", "hello"} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
require.False(t, os.present)
|
||||
err := v.Set(c)
|
||||
assert.NoError(t, err, c)
|
||||
assert.Equal(t, c, os.value)
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOS, commandOS optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OS",
|
||||
Value: newOptionalStringValue(&globalOS),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OS",
|
||||
Value: newOptionalStringValue(&commandOS),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOS.present)
|
||||
assert.False(t, commandOS.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalStringString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalString
|
||||
expected string
|
||||
}{
|
||||
{optionalString{present: true, value: "hello"}, "hello"},
|
||||
{optionalString{present: true, value: ""}, ""},
|
||||
{optionalString{present: false, value: "hello"}, ""},
|
||||
{optionalString{present: false, value: ""}, ""},
|
||||
} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
os = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringIsBoolFlag(t *testing.T) {
|
||||
// NOTE: optionalStringValue does not implement IsBoolFlag!
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOS optionalString
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalString{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OS=hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS=true
|
||||
{[]string{"--OS=", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS=false
|
||||
{[]string{"--OS", "hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS true
|
||||
{[]string{"--OS", "", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS false
|
||||
} {
|
||||
var os optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OS",
|
||||
Value: newOptionalStringValue(&os),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOS, os)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
@@ -2,155 +2,200 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/containers/common/pkg/report"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/cmd/skopeo/inspect"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// inspectOutput is the output format of (skopeo inspect), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type inspectOutput struct {
|
||||
Name string `json:",omitempty"`
|
||||
Tag string `json:",omitempty"`
|
||||
Digest digest.Digest
|
||||
RepoTags []string
|
||||
Created *time.Time
|
||||
DockerVersion string
|
||||
Labels map[string]string
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
}
|
||||
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
doNotListTags bool // Do not list all tags available in the same repository
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) cli.Command {
|
||||
func inspectCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := inspectOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [command options] IMAGE-NAME",
|
||||
Short: "Inspect image IMAGE-NAME",
|
||||
Long: fmt.Sprintf(`Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: append(append([]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest or configuration",
|
||||
Destination: &opts.raw,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "config",
|
||||
Usage: "output configuration",
|
||||
Destination: &opts.config,
|
||||
},
|
||||
}, sharedFlags...), imageFlags...),
|
||||
Action: commandAction(opts.run),
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo inspect docker://registry.fedoraproject.org/fedora
|
||||
skopeo inspect --config docker://docker.io/alpine
|
||||
skopeo inspect --format "Name: {{.Name}} Digest: {{.Digest}}" docker://registry.access.redhat.com/ubi8`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.raw, "raw", false, "output raw manifest or configuration")
|
||||
flags.BoolVar(&opts.config, "config", false, "output configuration")
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output to a Go template")
|
||||
flags.BoolVarP(&opts.doNotListTags, "no-tags", "n", false, "Do not list the available tags from the repository in the output")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
var (
|
||||
rawManifest []byte
|
||||
src types.ImageSource
|
||||
imgInspect *types.ImageInspectInfo
|
||||
data []interface{}
|
||||
)
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
if opts.raw && opts.format != "" {
|
||||
return errors.New("raw output does not support format option")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img, err := parseImage(ctx, opts.image, imageName)
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
src, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error parsing image name %q: %w", imageName, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "closing image", err)
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
rawManifest, _, err = src.GetManifest(ctx, nil)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error retrieving manifest for image: %w", err)
|
||||
}
|
||||
if opts.config && opts.raw {
|
||||
configBlob, err := img.ConfigBlob(ctx)
|
||||
|
||||
if opts.raw && !opts.config {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading configuration blob: %v", err)
|
||||
return fmt.Errorf("Error writing manifest to standard output: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing manifest for image: %w", err)
|
||||
}
|
||||
|
||||
if opts.config && opts.raw {
|
||||
var configBlob []byte
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
configBlob, err = img.ConfigBlob(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error reading configuration blob: %w", err)
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.raw {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %w", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
|
||||
var config *v1.Image
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
config, err = img.OCIConfig(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %w", err)
|
||||
}
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
var out []byte
|
||||
out, err = json.MarshalIndent(config, "", " ")
|
||||
if err == nil {
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
}
|
||||
} else {
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, config)
|
||||
err = printTmpl(row, data)
|
||||
}
|
||||
err = json.NewEncoder(stdout).Encode(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %v", err)
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
imgInspect, err = img.Inspect(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
|
||||
outputData := inspect.Output{
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for docker.Transport.
|
||||
RepoTags: []string{}, // Possibly overridden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
Env: imgInspect.Env,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
return fmt.Errorf("Error computing manifest digest: %w", err)
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
if !opts.doNotListTags && img.Reference().Transport() == docker.Transport {
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -160,16 +205,31 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
|
||||
// action is not allowed.
|
||||
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
|
||||
return fmt.Errorf("Error determining repository tags: %w", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err == nil {
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
}
|
||||
return err
|
||||
}
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, outputData)
|
||||
return printTmpl(row, data)
|
||||
}
|
||||
|
||||
func printTmpl(row string, data []interface{}) error {
|
||||
t, err := template.New("skopeo inspect").Parse(row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
return nil
|
||||
w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
|
||||
return t.Execute(w, data)
|
||||
}
|
||||
|
||||
23
cmd/skopeo/inspect/output.go
Normal file
23
cmd/skopeo/inspect/output.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package inspect
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Output is the output format of (skopeo inspect),
|
||||
// primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type Output struct {
|
||||
Name string `json:",omitempty"`
|
||||
Tag string `json:",omitempty"`
|
||||
Digest digest.Digest
|
||||
RepoTags []string
|
||||
Created *time.Time
|
||||
DockerVersion string
|
||||
Labels map[string]string
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
Env []string
|
||||
}
|
||||
@@ -1,41 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) cli.Command {
|
||||
func layersCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := layersOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
cmd := &cobra.Command{
|
||||
Hidden: true,
|
||||
Use: "layers [command options] IMAGE-NAME [LAYER...]",
|
||||
Short: "Get layers of IMAGE-NAME",
|
||||
RunE: commandAction(opts.run),
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
@@ -57,21 +64,29 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
rawSource, err := parseImageSource(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
var (
|
||||
rawSource types.ImageSource
|
||||
src types.ImageCloser
|
||||
)
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
rawSource, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
src, err = image.FromSource(ctx, sys, rawSource)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
return fmt.Errorf("%w (closing image source: %v)", err, closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
retErr = noteCloseFailure(retErr, "closing image", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -106,7 +121,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
tmpDir, err := os.MkdirTemp(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -121,30 +136,39 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
retErr = noteCloseFailure(retErr, "closing destination", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
if err != nil {
|
||||
var (
|
||||
r io.ReadCloser
|
||||
blobSize int64
|
||||
)
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
return fmt.Errorf("%w (close error: %v)", err, closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
var manifest []byte
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
manifest, _, err = src.Manifest(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
if err := dest.PutManifest(ctx, manifest, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dest.Commit(ctx)
|
||||
return dest.Commit(ctx, image.UnparsedInstance(rawSource, nil))
|
||||
}
|
||||
|
||||
211
cmd/skopeo/list_tags.go
Normal file
211
cmd/skopeo/list_tags.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/archive"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// tagListOutput is the output format of (skopeo list-tags), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type tagListOutput struct {
|
||||
Repository string `json:",omitempty"`
|
||||
Tags []string
|
||||
}
|
||||
|
||||
type tagsOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
var transportHandlers = map[string]func(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error){
|
||||
docker.Transport.Name(): listDockerRepoTags,
|
||||
archive.Transport.Name(): listDockerArchiveTags,
|
||||
}
|
||||
|
||||
// supportedTransports returns all the supported transports
|
||||
func supportedTransports(joinStr string) string {
|
||||
res := make([]string, 0, len(transportHandlers))
|
||||
for handlerName := range transportHandlers {
|
||||
res = append(res, handlerName)
|
||||
}
|
||||
sort.Strings(res)
|
||||
return strings.Join(res, joinStr)
|
||||
}
|
||||
|
||||
func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
|
||||
opts := tagsOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "list-tags [command options] SOURCE-IMAGE",
|
||||
Short: "List tags in the transport/repository specified by the SOURCE-IMAGE",
|
||||
Long: `Return the list of tags from the transport/repository "SOURCE-IMAGE"
|
||||
|
||||
Supported transports:
|
||||
` + supportedTransports(" ") + `
|
||||
|
||||
See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
|
||||
`,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo list-tags docker://docker.io/fedora`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Customized version of the alltransports.ParseImageName and docker.ParseReference that does not place a default tag in the reference
|
||||
// Would really love to not have this, but needed to enforce tag-less and digest-less names
|
||||
func parseDockerRepositoryReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, docker.Transport.Name()+"://") {
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with %s://", refString, docker.Transport.Name())
|
||||
}
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, refString)
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(parts[1], "//"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return nil, errors.New(`No tag or digest allowed in reference`)
|
||||
}
|
||||
|
||||
// Checks ok, now return a reference. This is a hack because the tag listing code expects a full image reference even though the tag is ignored
|
||||
return docker.NewReference(reference.TagNameOnly(ref))
|
||||
}
|
||||
|
||||
// List the tags from a repository contained in the imgRef reference. Any tag value in the reference is ignored
|
||||
func listDockerTags(ctx context.Context, sys *types.SystemContext, imgRef types.ImageReference) (string, []string, error) {
|
||||
repositoryName := imgRef.DockerReference().Name()
|
||||
|
||||
tags, err := docker.GetRepositoryTags(ctx, sys, imgRef)
|
||||
if err != nil {
|
||||
return ``, nil, fmt.Errorf("Error listing repository tags: %w", err)
|
||||
}
|
||||
return repositoryName, tags, nil
|
||||
}
|
||||
|
||||
// return the tagLists from a docker repo
|
||||
func listDockerRepoTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// return the tagLists from a docker archive file
|
||||
func listDockerArchiveTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
ref, err := alltransports.ParseImageName(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tarReader, _, err := archive.NewReaderForReference(sys, ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
imageRefs, err := tarReader.List()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var repoTags []string
|
||||
for imageIndex, items := range imageRefs {
|
||||
for _, ref := range items {
|
||||
repoTags, err = tarReader.ManifestTagsForReference(ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// handle for each untagged image
|
||||
if len(repoTags) == 0 {
|
||||
repoTags = []string{fmt.Sprintf("@%d", imageIndex)}
|
||||
}
|
||||
tagListing = append(tagListing, repoTags...)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly one non-option argument expected")}
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transport := alltransports.TransportFromImageName(args[0])
|
||||
if transport == nil {
|
||||
return fmt.Errorf("Invalid %q: does not specify a transport", args[0])
|
||||
}
|
||||
|
||||
var repositoryName string
|
||||
var tagListing []string
|
||||
|
||||
if val, ok := transportHandlers[transport.Name()]; ok {
|
||||
repositoryName, tagListing, err = val(ctx, sys, opts, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unsupported transport '%s' for tag listing. Only supported: %s",
|
||||
transport.Name(), supportedTransports(", "))
|
||||
}
|
||||
|
||||
outputData := tagListOutput{
|
||||
Repository: repositoryName,
|
||||
Tags: tagListing,
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
|
||||
return err
|
||||
}
|
||||
58
cmd/skopeo/list_tags_test.go
Normal file
58
cmd/skopeo/list_tags_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Tests the kinds of inputs allowed and expected to the command
|
||||
func TestDockerRepositoryReferenceParser(t *testing.T) {
|
||||
for _, test := range [][]string{
|
||||
{"docker://myhost.com:1000/nginx"}, //no tag
|
||||
{"docker://myhost.com/nginx"}, //no port or tag
|
||||
{"docker://somehost.com"}, // Valid default expansion
|
||||
{"docker://nginx"}, // Valid default expansion
|
||||
} {
|
||||
|
||||
ref, err := parseDockerRepositoryReference(test[0])
|
||||
require.NoError(t, err)
|
||||
expected, err := alltransports.ParseImageName(test[0])
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected.DockerReference().Name(), ref.DockerReference().Name(), "Mismatched parse result for input %v", test[0])
|
||||
}
|
||||
|
||||
for _, test := range [][]string{
|
||||
{"oci://somedir"},
|
||||
{"dir:/somepath"},
|
||||
{"docker-archive:/tmp/dir"},
|
||||
{"container-storage:myhost.com/someimage"},
|
||||
{"docker-daemon:myhost.com/someimage"},
|
||||
{"docker://myhost.com:1000/nginx:foobar:foobar"}, // Invalid repository ref
|
||||
{"docker://somehost.com:5000/"}, // no repo
|
||||
{"docker://myhost.com:1000/nginx:latest"}, //tag not allowed
|
||||
{"docker://myhost.com:1000/nginx@sha256:abcdef1234567890"}, //digest not allowed
|
||||
} {
|
||||
_, err := parseDockerRepositoryReference(test[0])
|
||||
assert.Error(t, err, test[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerRepositoryReferenceParserDrift(t *testing.T) {
|
||||
for _, test := range [][]string{
|
||||
{"docker://myhost.com:1000/nginx", "myhost.com:1000/nginx"}, //no tag
|
||||
{"docker://myhost.com/nginx", "myhost.com/nginx"}, //no port or tag
|
||||
{"docker://somehost.com", "docker.io/library/somehost.com"}, // Valid default expansion
|
||||
{"docker://nginx", "docker.io/library/nginx"}, // Valid default expansion
|
||||
} {
|
||||
|
||||
ref, err := parseDockerRepositoryReference(test[0])
|
||||
ref2, err2 := alltransports.ParseImageName(test[0])
|
||||
|
||||
if assert.NoError(t, err, "Could not parse, got error on %v", test[0]) && assert.NoError(t, err2, "Could not parse with regular parser, got error on %v", test[0]) {
|
||||
assert.Equal(t, ref.DockerReference().String(), ref2.DockerReference().String(), "Different parsing output for input %v. Repo parse = %v, regular parser = %v", test[0], ref, ref2)
|
||||
}
|
||||
}
|
||||
}
|
||||
48
cmd/skopeo/login.go
Normal file
48
cmd/skopeo/login.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/common/pkg/auth"
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type loginOptions struct {
|
||||
global *globalOptions
|
||||
loginOpts auth.LoginOptions
|
||||
tlsVerify commonFlag.OptionalBool
|
||||
}
|
||||
|
||||
func loginCmd(global *globalOptions) *cobra.Command {
|
||||
opts := loginOptions{
|
||||
global: global,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "login [command options] REGISTRY",
|
||||
Short: "Login to a container registry",
|
||||
Long: "Login to a container registry on a specified server.",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo login quay.io`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
commonFlag.OptionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
flags.AddFlagSet(auth.GetLoginFlags(&opts.loginOpts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *loginOptions) run(args []string, stdout io.Writer) error {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
opts.loginOpts.Stdout = stdout
|
||||
opts.loginOpts.Stdin = os.Stdin
|
||||
opts.loginOpts.AcceptRepositories = true
|
||||
sys := opts.global.newSystemContext()
|
||||
if opts.tlsVerify.Present() {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
return auth.Login(ctx, sys, &opts.loginOpts, args)
|
||||
}
|
||||
44
cmd/skopeo/logout.go
Normal file
44
cmd/skopeo/logout.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/common/pkg/auth"
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type logoutOptions struct {
|
||||
global *globalOptions
|
||||
logoutOpts auth.LogoutOptions
|
||||
tlsVerify commonFlag.OptionalBool
|
||||
}
|
||||
|
||||
func logoutCmd(global *globalOptions) *cobra.Command {
|
||||
opts := logoutOptions{
|
||||
global: global,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "logout [command options] REGISTRY",
|
||||
Short: "Logout of a container registry",
|
||||
Long: "Logout of a container registry on a specified server.",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo logout quay.io`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
commonFlag.OptionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
flags.AddFlagSet(auth.GetLogoutFlags(&opts.logoutOpts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *logoutOptions) run(args []string, stdout io.Writer) error {
|
||||
opts.logoutOpts.Stdout = stdout
|
||||
opts.logoutOpts.AcceptRepositories = true
|
||||
sys := opts.global.newSystemContext()
|
||||
if opts.tlsVerify.Present() {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
return auth.Logout(sys, &opts.logoutOpts, args)
|
||||
}
|
||||
@@ -3,114 +3,122 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// gitCommit will be the hash that the binary was built from
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
var defaultUserAgent = "skopeo/" + version.Version
|
||||
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuratio directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
debug bool // Enable debug output
|
||||
tlsVerify commonFlag.OptionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuration directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
overrideVariant string // Architecture variant to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
tmpDir string // Path to use for big temporary files
|
||||
}
|
||||
|
||||
// createApp returns a cli.App, and the underlying globalOptions object, to be run or tested.
|
||||
func createApp() (*cli.App, *globalOptions) {
|
||||
// requireSubcommand returns an error if no sub command is provided
|
||||
// This was copied from podman: `github.com/containers/podman/cmd/podman/validate/args.go
|
||||
// Some small style changes to match skopeo were applied, but try to apply any
|
||||
// bugfixes there first.
|
||||
func requireSubcommand(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
suggestions := cmd.SuggestionsFor(args[0])
|
||||
if len(suggestions) == 0 {
|
||||
return fmt.Errorf("Unrecognized command `%[1]s %[2]s`\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0])
|
||||
}
|
||||
return fmt.Errorf("Unrecognized command `%[1]s %[2]s`\n\nDid you mean this?\n\t%[3]s\n\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0], strings.Join(suggestions, "\n\t"))
|
||||
}
|
||||
return fmt.Errorf("Missing command '%[1]s COMMAND'\nTry '%[1]s --help' for more information", cmd.CommandPath())
|
||||
}
|
||||
|
||||
// createApp returns a cobra.Command, and the underlying globalOptions object, to be run or tested.
|
||||
func createApp() (*cobra.Command, *globalOptions) {
|
||||
opts := globalOptions{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Name = "skopeo"
|
||||
rootCommand := &cobra.Command{
|
||||
Use: "skopeo",
|
||||
Long: "Various operations with container images and container image registries",
|
||||
RunE: requireSubcommand,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return opts.before(cmd)
|
||||
},
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
// Hide the completion command which is provided by cobra
|
||||
CompletionOptions: cobra.CompletionOptions{HiddenDefaultCmd: true},
|
||||
// This is documented to parse "local" (non-PersistentFlags) flags of parent commands before
|
||||
// running subcommands and handling their options. We don't really run into such cases,
|
||||
// because all of our flags on rootCommand are in PersistentFlags, except for the deprecated --tls-verify;
|
||||
// in that case we need TraverseChildren so that we can distinguish between
|
||||
// (skopeo --tls-verify inspect) (causes a warning) and (skopeo inspect --tls-verify) (no warning).
|
||||
TraverseChildren: true,
|
||||
}
|
||||
if gitCommit != "" {
|
||||
app.Version = fmt.Sprintf("%s commit: %s", version.Version, gitCommit)
|
||||
rootCommand.Version = fmt.Sprintf("%s commit: %s", version.Version, gitCommit)
|
||||
} else {
|
||||
app.Version = version.Version
|
||||
rootCommand.Version = version.Version
|
||||
}
|
||||
app.Usage = "Various operations with container images and container image registries"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Destination: &opts.debug,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
Hidden: true,
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "policy",
|
||||
Usage: "Path to a trust policy file",
|
||||
Destination: &opts.policyPath,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Destination: &opts.insecurePolicy,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries.d",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Destination: &opts.registriesDirPath,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-arch",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Destination: &opts.overrideArch,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-os",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Destination: &opts.overrideOS,
|
||||
},
|
||||
cli.DurationFlag{
|
||||
Name: "command-timeout",
|
||||
Usage: "timeout for the command execution",
|
||||
Destination: &opts.commandTimeout,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf",
|
||||
Usage: "path to the registries.conf file",
|
||||
Destination: &opts.registriesConfPath,
|
||||
Hidden: true,
|
||||
},
|
||||
// Override default `--version` global flag to enable `-v` shorthand
|
||||
var dummyVersion bool
|
||||
rootCommand.Flags().BoolVarP(&dummyVersion, "version", "v", false, "Version for Skopeo")
|
||||
rootCommand.PersistentFlags().BoolVar(&opts.debug, "debug", false, "enable debug output")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.policyPath, "policy", "", "Path to a trust policy file")
|
||||
rootCommand.PersistentFlags().BoolVar(&opts.insecurePolicy, "insecure-policy", false, "run the tool without any policy check")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.registriesDirPath, "registries.d", "", "use registry configuration files in `DIR` (e.g. for container signature storage)")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.overrideArch, "override-arch", "", "use `ARCH` instead of the architecture of the machine for choosing images")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.overrideOS, "override-os", "", "use `OS` instead of the running OS for choosing images")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.overrideVariant, "override-variant", "", "use `VARIANT` instead of the running architecture variant for choosing images")
|
||||
rootCommand.PersistentFlags().DurationVar(&opts.commandTimeout, "command-timeout", 0, "timeout for the command execution")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.registriesConfPath, "registries-conf", "", "path to the registries.conf file")
|
||||
if err := rootCommand.PersistentFlags().MarkHidden("registries-conf"); err != nil {
|
||||
logrus.Fatal("unable to mark registries-conf flag as hidden")
|
||||
}
|
||||
app.Before = opts.before
|
||||
app.Commands = []cli.Command{
|
||||
rootCommand.PersistentFlags().StringVar(&opts.tmpDir, "tmpdir", "", "directory used to store temporary files")
|
||||
flag := commonFlag.OptionalBoolFlag(rootCommand.Flags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
|
||||
flag.Hidden = true
|
||||
rootCommand.AddCommand(
|
||||
copyCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
inspectCmd(&opts),
|
||||
layersCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
loginCmd(&opts),
|
||||
logoutCmd(&opts),
|
||||
manifestDigestCmd(),
|
||||
proxyCmd(&opts),
|
||||
syncCmd(&opts),
|
||||
standaloneSignCmd(),
|
||||
standaloneVerifyCmd(),
|
||||
tagsCmd(&opts),
|
||||
untrustedSignatureDumpCmd(),
|
||||
}
|
||||
return app, &opts
|
||||
)
|
||||
return rootCommand, &opts
|
||||
}
|
||||
|
||||
// before is run by the cli package for any command, before running the command-specific handler.
|
||||
func (opts *globalOptions) before(ctx *cli.Context) error {
|
||||
func (opts *globalOptions) before(cmd *cobra.Command) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
if opts.tlsVerify.Present() {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
@@ -120,8 +128,8 @@ func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
app, _ := createApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
rootCmd, _ := createApp()
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -153,3 +161,22 @@ func (opts *globalOptions) commandTimeoutContext() (context.Context, context.Can
|
||||
}
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *globalOptions) newSystemContext() *types.SystemContext {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: opts.registriesDirPath,
|
||||
ArchitectureChoice: opts.overrideArch,
|
||||
OSChoice: opts.overrideOS,
|
||||
VariantChoice: opts.overrideVariant,
|
||||
SystemRegistriesConfPath: opts.registriesConfPath,
|
||||
BigFilesTemporaryDir: opts.tmpDir,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.tlsVerify.Present() {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
@@ -1,14 +1,51 @@
|
||||
package main
|
||||
|
||||
import "bytes"
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// runSkopeo creates an app object and runs it with args, with an implied first "skopeo".
|
||||
// Returns output intended for stdout and the returned error, if any.
|
||||
func runSkopeo(args ...string) (string, error) {
|
||||
app, _ := createApp()
|
||||
stdout := bytes.Buffer{}
|
||||
app.Writer = &stdout
|
||||
args = append([]string{"skopeo"}, args...)
|
||||
err := app.Run(args)
|
||||
app.SetOut(&stdout)
|
||||
app.SetArgs(args)
|
||||
err := app.Execute()
|
||||
return stdout.String(), err
|
||||
}
|
||||
|
||||
func TestGlobalOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts, _ := fakeGlobalOptions(t, []string{})
|
||||
res := opts.newSystemContext()
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
// User-Agent is set by default.
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
// Set everything to non-default values.
|
||||
opts, _ = fakeGlobalOptions(t, []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
"--override-variant", "overridden-variant",
|
||||
"--tmpdir", "/srv",
|
||||
"--registries-conf", "/srv/registries.conf",
|
||||
"--tls-verify=false",
|
||||
})
|
||||
res = opts.newSystemContext()
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
SystemRegistriesConfPath: "/srv/registries.conf",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
}
|
||||
|
||||
@@ -4,23 +4,25 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type manifestDigestOptions struct {
|
||||
}
|
||||
|
||||
func manifestDigestCmd() cli.Command {
|
||||
opts := manifestDigestOptions{}
|
||||
return cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: commandAction(opts.run),
|
||||
func manifestDigestCmd() *cobra.Command {
|
||||
var opts manifestDigestOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "manifest-digest MANIFEST-FILE",
|
||||
Short: "Compute a manifest digest of a file",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: "skopeo manifest-digest manifest.json",
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
@@ -29,7 +31,7 @@ func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
man, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ func TestManifestDigest(t *testing.T) {
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("manifest-digest", "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
out, err := runSkopeo("manifest-digest", "/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error computing manifest
|
||||
out, err = runSkopeo("manifest-digest", "fixtures/v2s1-invalid-signatures.manifest.json")
|
||||
|
||||
738
cmd/skopeo/proxy.go
Normal file
738
cmd/skopeo/proxy.go
Normal file
@@ -0,0 +1,738 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
This code is currently only intended to be used by ostree
|
||||
to fetch content via containers. The API is subject
|
||||
to change. A goal however is to stabilize the API
|
||||
eventually as a full out-of-process interface to the
|
||||
core containers/image library functionality.
|
||||
|
||||
To use this command, in a parent process create a
|
||||
`socketpair()` of type `SOCK_SEQPACKET`. Fork
|
||||
off this command, and pass one half of the socket
|
||||
pair to the child. Providing it on stdin (fd 0)
|
||||
is the expected default.
|
||||
|
||||
The protocol is JSON for the control layer,
|
||||
and a read side of a `pipe()` passed for large data.
|
||||
|
||||
Base JSON protocol:
|
||||
|
||||
request: { method: "MethodName": args: [arguments] }
|
||||
reply: { success: bool, value: JSVAL, pipeid: number, error: string }
|
||||
|
||||
For any non-metadata i.e. payload data from `GetManifest`
|
||||
and `GetBlob` the server will pass back the read half of a `pipe(2)` via FD passing,
|
||||
along with a `pipeid` integer.
|
||||
|
||||
The expected flow looks like this:
|
||||
|
||||
- Initialize
|
||||
And validate the returned protocol version versus
|
||||
what your client supports.
|
||||
- OpenImage docker://quay.io/someorg/example:latest
|
||||
(returns an imageid)
|
||||
- GetManifest imageid (and associated <pipeid>)
|
||||
(Streaming read data from pipe)
|
||||
- FinishPipe <pipeid>
|
||||
- GetBlob imageid sha256:...
|
||||
(Streaming read data from pipe)
|
||||
- FinishPipe <pipeid>
|
||||
- GetBlob imageid sha256:...
|
||||
(Streaming read data from pipe)
|
||||
- FinishPipe <pipeid>
|
||||
- CloseImage imageid
|
||||
|
||||
You may interleave invocations of these methods, e.g. one
|
||||
can also invoke `OpenImage` multiple times, as well as
|
||||
starting multiple GetBlob requests before calling `FinishPipe`
|
||||
on them. The server will stream data into the pipefd
|
||||
until `FinishPipe` is invoked.
|
||||
|
||||
Note that the pipe will not be closed by the server until
|
||||
the client has invoked `FinishPipe`. This is to ensure
|
||||
that the client checks for errors. For example, `GetBlob`
|
||||
performs digest (e.g. sha256) verification and this must
|
||||
be checked after all data has been written.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// protocolVersion is semantic version of the protocol used by this proxy.
|
||||
// The first version of the protocol has major version 0.2 to signify a
|
||||
// departure from the original code which used HTTP.
|
||||
//
|
||||
// 0.2.1: Initial version
|
||||
// 0.2.2: Added support for fetching image configuration as OCI
|
||||
// 0.2.3: Added GetFullConfig
|
||||
const protocolVersion = "0.2.3"
|
||||
|
||||
// maxMsgSize is the current limit on a packet size.
|
||||
// Note that all non-metadata (i.e. payload data) is sent over a pipe.
|
||||
const maxMsgSize = 32 * 1024
|
||||
|
||||
// maxJSONFloat is ECMA Number.MAX_SAFE_INTEGER
|
||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
|
||||
// We hard error if the input JSON numbers we expect to be
|
||||
// integers are above this.
|
||||
const maxJSONFloat = float64(uint64(1)<<53 - 1)
|
||||
|
||||
// request is the JSON serialization of a function call
|
||||
type request struct {
|
||||
// Method is the name of the function
|
||||
Method string `json:"method"`
|
||||
// Args is the arguments (parsed inside the function)
|
||||
Args []interface{} `json:"args"`
|
||||
}
|
||||
|
||||
// reply is serialized to JSON as the return value from a function call.
|
||||
type reply struct {
|
||||
// Success is true if and only if the call succeeded.
|
||||
Success bool `json:"success"`
|
||||
// Value is an arbitrary value (or values, as array/map) returned from the call.
|
||||
Value interface{} `json:"value"`
|
||||
// PipeID is an index into open pipes, and should be passed to FinishPipe
|
||||
PipeID uint32 `json:"pipeid"`
|
||||
// Error should be non-empty if Success == false
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// replyBuf is our internal deserialization of reply plus optional fd
|
||||
type replyBuf struct {
|
||||
// value will be converted to a reply Value
|
||||
value interface{}
|
||||
// fd is the read half of a pipe, passed back to the client
|
||||
fd *os.File
|
||||
// pipeid will be provided to the client as PipeID, an index into our open pipes
|
||||
pipeid uint32
|
||||
}
|
||||
|
||||
// activePipe is an open pipe to the client.
|
||||
// It contains an error value
|
||||
type activePipe struct {
|
||||
// w is the write half of the pipe
|
||||
w *os.File
|
||||
// wg is completed when our worker goroutine is done
|
||||
wg sync.WaitGroup
|
||||
// err may be set in our worker goroutine
|
||||
err error
|
||||
}
|
||||
|
||||
// openImage is an opened image reference
|
||||
type openImage struct {
|
||||
// id is an opaque integer handle
|
||||
id uint32
|
||||
src types.ImageSource
|
||||
cachedimg types.Image
|
||||
}
|
||||
|
||||
// proxyHandler is the state associated with our socket.
|
||||
type proxyHandler struct {
|
||||
// lock protects everything else in this structure.
|
||||
lock sync.Mutex
|
||||
// opts is CLI options
|
||||
opts *proxyOptions
|
||||
sysctx *types.SystemContext
|
||||
cache types.BlobInfoCache
|
||||
|
||||
// imageSerial is a counter for open images
|
||||
imageSerial uint32
|
||||
// images holds our opened images
|
||||
images map[uint32]*openImage
|
||||
// activePipes maps from "pipeid" to a pipe + goroutine pair
|
||||
activePipes map[uint32]*activePipe
|
||||
}
|
||||
|
||||
// Initialize performs one-time initialization, and returns the protocol version
|
||||
func (h *proxyHandler) Initialize(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if len(args) != 0 {
|
||||
return ret, fmt.Errorf("invalid request, expecting zero arguments")
|
||||
}
|
||||
|
||||
if h.sysctx != nil {
|
||||
return ret, fmt.Errorf("already initialized")
|
||||
}
|
||||
|
||||
sysctx, err := h.opts.imageOpts.newSystemContext()
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
h.sysctx = sysctx
|
||||
h.cache = blobinfocache.DefaultCache(sysctx)
|
||||
|
||||
r := replyBuf{
|
||||
value: protocolVersion,
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// OpenImage accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
|
||||
// The return value is an opaque integer handle.
|
||||
func (h *proxyHandler) OpenImage(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting one argument")
|
||||
}
|
||||
imageref, ok := args[0].(string)
|
||||
if !ok {
|
||||
return ret, fmt.Errorf("expecting string imageref, not %T", args[0])
|
||||
}
|
||||
|
||||
imgRef, err := alltransports.ParseImageName(imageref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
imgsrc, err := imgRef.NewImageSource(context.Background(), h.sysctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
h.imageSerial++
|
||||
openimg := &openImage{
|
||||
id: h.imageSerial,
|
||||
src: imgsrc,
|
||||
}
|
||||
h.images[openimg.id] = openimg
|
||||
ret.value = openimg.id
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (h *proxyHandler) CloseImage(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting one argument")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
imgref.src.Close()
|
||||
delete(h.images, imgref.id)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func parseImageID(v interface{}) (uint32, error) {
|
||||
imgidf, ok := v.(float64)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("expecting integer imageid, not %T", v)
|
||||
}
|
||||
return uint32(imgidf), nil
|
||||
}
|
||||
|
||||
// parseUint64 validates that a number fits inside a JavaScript safe integer
|
||||
func parseUint64(v interface{}) (uint64, error) {
|
||||
f, ok := v.(float64)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("expecting numeric, not %T", v)
|
||||
}
|
||||
if f > maxJSONFloat {
|
||||
return 0, fmt.Errorf("out of range integer for numeric %f", f)
|
||||
}
|
||||
return uint64(f), nil
|
||||
}
|
||||
|
||||
func (h *proxyHandler) parseImageFromID(v interface{}) (*openImage, error) {
|
||||
imgid, err := parseImageID(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imgref, ok := h.images[imgid]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no image %v", imgid)
|
||||
}
|
||||
return imgref, nil
|
||||
}
|
||||
|
||||
func (h *proxyHandler) allocPipe() (*os.File, *activePipe, error) {
|
||||
piper, pipew, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f := activePipe{
|
||||
w: pipew,
|
||||
}
|
||||
h.activePipes[uint32(pipew.Fd())] = &f
|
||||
f.wg.Add(1)
|
||||
return piper, &f, nil
|
||||
}
|
||||
|
||||
// returnBytes generates a return pipe() from a byte array
|
||||
// In the future it might be nicer to return this via memfd_create()
|
||||
func (h *proxyHandler) returnBytes(retval interface{}, buf []byte) (replyBuf, error) {
|
||||
var ret replyBuf
|
||||
piper, f, err := h.allocPipe()
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Signal completion when we return
|
||||
defer f.wg.Done()
|
||||
_, err = io.Copy(f.w, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
f.err = err
|
||||
}
|
||||
}()
|
||||
|
||||
ret.value = retval
|
||||
ret.fd = piper
|
||||
ret.pipeid = uint32(f.w.Fd())
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// cacheTargetManifest is invoked when GetManifest or GetConfig is invoked
|
||||
// the first time for a given image. If the requested image is a manifest
|
||||
// list, this function resolves it to the image matching the calling process'
|
||||
// operating system and architecture.
|
||||
//
|
||||
// TODO: Add GetRawManifest or so that exposes manifest lists
|
||||
func (h *proxyHandler) cacheTargetManifest(img *openImage) error {
|
||||
ctx := context.Background()
|
||||
if img.cachedimg != nil {
|
||||
return nil
|
||||
}
|
||||
unparsedToplevel := image.UnparsedInstance(img.src, nil)
|
||||
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var target *image.UnparsedImage
|
||||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceDigest, err := manifestList.ChooseInstance(h.sysctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target = image.UnparsedInstance(img.src, &instanceDigest)
|
||||
} else {
|
||||
target = unparsedToplevel
|
||||
}
|
||||
cachedimg, err := image.FromUnparsedImage(ctx, h.sysctx, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
img.cachedimg = cachedimg
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetManifest returns a copy of the manifest, converted to OCI format, along with the original digest.
|
||||
// Manifest lists are resolved to the current operating system and architecture.
|
||||
func (h *proxyHandler) GetManifest(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting one argument")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
ctx := context.Background()
|
||||
rawManifest, manifestType, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// We only support OCI and docker2schema2. We know docker2schema2 can be easily+cheaply
|
||||
// converted into OCI, so consumers only need to see OCI.
|
||||
switch manifestType {
|
||||
case imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType:
|
||||
break
|
||||
// Explicitly reject e.g. docker schema 1 type with a "legacy" note
|
||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
|
||||
return ret, fmt.Errorf("unsupported legacy manifest MIME type: %s", manifestType)
|
||||
default:
|
||||
return ret, fmt.Errorf("unsupported manifest MIME type: %s", manifestType)
|
||||
}
|
||||
|
||||
// We always return the original digest, as that's what clients need to do pull-by-digest
|
||||
// and in general identify the image.
|
||||
digest, err := manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
var serialized []byte
|
||||
// But, we convert to OCI format on the wire if it's not already. The idea here is that by reusing the containers/image
|
||||
// stack, clients to this proxy can pretend the world is OCI only, and not need to care about e.g.
|
||||
// docker schema and MIME types.
|
||||
if manifestType != imgspecv1.MediaTypeImageManifest {
|
||||
manifestUpdates := types.ManifestUpdateOptions{ManifestMIMEType: imgspecv1.MediaTypeImageManifest}
|
||||
ociImage, err := img.UpdatedImage(ctx, manifestUpdates)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
ociSerialized, _, err := ociImage.Manifest(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
serialized = ociSerialized
|
||||
} else {
|
||||
serialized = rawManifest
|
||||
}
|
||||
return h.returnBytes(digest, serialized)
|
||||
}
|
||||
|
||||
// GetFullConfig returns a copy of the image configuration, converted to OCI format.
|
||||
// https://github.com/opencontainers/image-spec/blob/main/config.md
|
||||
func (h *proxyHandler) GetFullConfig(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting: [imgid]")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
ctx := context.TODO()
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
serialized, err := json.Marshal(&config)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
return h.returnBytes(nil, serialized)
|
||||
}
|
||||
|
||||
// GetConfig returns a copy of the container runtime configuration, converted to OCI format.
|
||||
// Note that due to a historical mistake, this returns not the full image configuration,
|
||||
// but just the container runtime configuration. You should use GetFullConfig instead.
|
||||
func (h *proxyHandler) GetConfig(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting: [imgid]")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
ctx := context.TODO()
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
serialized, err := json.Marshal(&config.Config)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
return h.returnBytes(nil, serialized)
|
||||
}
|
||||
|
||||
// GetBlob fetches a blob, performing digest verification.
|
||||
func (h *proxyHandler) GetBlob(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 3 {
|
||||
return ret, fmt.Errorf("found %d args, expecting (imgid, digest, size)", len(args))
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
digestStr, ok := args[1].(string)
|
||||
if !ok {
|
||||
return ret, fmt.Errorf("expecting string blobid")
|
||||
}
|
||||
size, err := parseUint64(args[2])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
d, err := digest.Parse(digestStr)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
blobr, blobSize, err := imgref.src.GetBlob(ctx, types.BlobInfo{Digest: d, Size: int64(size)}, h.cache)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
piper, f, err := h.allocPipe()
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
go func() {
|
||||
// Signal completion when we return
|
||||
defer f.wg.Done()
|
||||
verifier := d.Verifier()
|
||||
tr := io.TeeReader(blobr, verifier)
|
||||
n, err := io.Copy(f.w, tr)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
if n != int64(size) {
|
||||
f.err = fmt.Errorf("expected %d bytes in blob, got %d", size, n)
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
f.err = fmt.Errorf("corrupted blob, expecting %s", d.String())
|
||||
}
|
||||
}()
|
||||
|
||||
ret.value = blobSize
|
||||
ret.fd = piper
|
||||
ret.pipeid = uint32(f.w.Fd())
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// FinishPipe waits for the worker goroutine to finish, and closes the write side of the pipe.
|
||||
func (h *proxyHandler) FinishPipe(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
pipeidv, err := parseUint64(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
pipeid := uint32(pipeidv)
|
||||
|
||||
f, ok := h.activePipes[pipeid]
|
||||
if !ok {
|
||||
return ret, fmt.Errorf("finishpipe: no active pipe %d", pipeid)
|
||||
}
|
||||
|
||||
// Wait for the goroutine to complete
|
||||
f.wg.Wait()
|
||||
// And only now do we close the write half; this forces the client to call this API
|
||||
f.w.Close()
|
||||
// Propagate any errors from the goroutine worker
|
||||
err = f.err
|
||||
delete(h.activePipes, pipeid)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// send writes a reply buffer to the socket
|
||||
func (buf replyBuf) send(conn *net.UnixConn, err error) error {
|
||||
replyToSerialize := reply{
|
||||
Success: err == nil,
|
||||
Value: buf.value,
|
||||
PipeID: buf.pipeid,
|
||||
}
|
||||
if err != nil {
|
||||
replyToSerialize.Error = err.Error()
|
||||
}
|
||||
serializedReply, err := json.Marshal(&replyToSerialize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We took ownership of the FD - close it when we're done.
|
||||
defer func() {
|
||||
if buf.fd != nil {
|
||||
buf.fd.Close()
|
||||
}
|
||||
}()
|
||||
// Copy the FD number to the socket ancillary buffer
|
||||
fds := make([]int, 0)
|
||||
if buf.fd != nil {
|
||||
fds = append(fds, int(buf.fd.Fd()))
|
||||
}
|
||||
oob := syscall.UnixRights(fds...)
|
||||
n, oobn, err := conn.WriteMsgUnix(serializedReply, oob, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Validate that we sent the full packet
|
||||
if n != len(serializedReply) || oobn != len(oob) {
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type proxyOptions struct {
|
||||
global *globalOptions
|
||||
imageOpts *imageOptions
|
||||
sockFd int
|
||||
}
|
||||
|
||||
func proxyCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
opts := proxyOptions{global: global, imageOpts: imageOpts}
|
||||
cmd := &cobra.Command{
|
||||
Use: "experimental-image-proxy [command options] IMAGE",
|
||||
Short: "Interactive proxy for fetching container images (EXPERIMENTAL)",
|
||||
Long: `Run skopeo as a proxy, supporting HTTP requests to fetch manifests and blobs.`,
|
||||
RunE: commandAction(opts.run),
|
||||
Args: cobra.ExactArgs(0),
|
||||
// Not stabilized yet
|
||||
Hidden: true,
|
||||
Example: `skopeo experimental-image-proxy --sockfd 3`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.IntVar(&opts.sockFd, "sockfd", 0, "Serve on opened socket pair (default 0/stdin)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
// processRequest dispatches a remote request.
|
||||
// replyBuf is the result of the invocation.
|
||||
// terminate should be true if processing of requests should halt.
|
||||
func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate bool, err error) {
|
||||
var req request
|
||||
|
||||
// Parse the request JSON
|
||||
if err = json.Unmarshal(readBytes, &req); err != nil {
|
||||
err = fmt.Errorf("invalid request: %v", err)
|
||||
return
|
||||
}
|
||||
// Dispatch on the method
|
||||
switch req.Method {
|
||||
case "Initialize":
|
||||
rb, err = h.Initialize(req.Args)
|
||||
case "OpenImage":
|
||||
rb, err = h.OpenImage(req.Args)
|
||||
case "CloseImage":
|
||||
rb, err = h.CloseImage(req.Args)
|
||||
case "GetManifest":
|
||||
rb, err = h.GetManifest(req.Args)
|
||||
case "GetConfig":
|
||||
rb, err = h.GetConfig(req.Args)
|
||||
case "GetFullConfig":
|
||||
rb, err = h.GetFullConfig(req.Args)
|
||||
case "GetBlob":
|
||||
rb, err = h.GetBlob(req.Args)
|
||||
case "FinishPipe":
|
||||
rb, err = h.FinishPipe(req.Args)
|
||||
case "Shutdown":
|
||||
terminate = true
|
||||
default:
|
||||
err = fmt.Errorf("unknown method: %s", req.Method)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Implementation of podman experimental-image-proxy
|
||||
func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
handler := &proxyHandler{
|
||||
opts: opts,
|
||||
images: make(map[uint32]*openImage),
|
||||
activePipes: make(map[uint32]*activePipe),
|
||||
}
|
||||
|
||||
// Convert the socket FD passed by client into a net.FileConn
|
||||
fd := os.NewFile(uintptr(opts.sockFd), "sock")
|
||||
fconn, err := net.FileConn(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn := fconn.(*net.UnixConn)
|
||||
|
||||
// Allocate a buffer to copy the packet into
|
||||
buf := make([]byte, maxMsgSize)
|
||||
for {
|
||||
n, _, err := conn.ReadFrom(buf)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("reading socket: %v", err)
|
||||
}
|
||||
readbuf := buf[0:n]
|
||||
|
||||
rb, terminate, err := handler.processRequest(readbuf)
|
||||
if terminate {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := rb.send(conn, err); err != nil {
|
||||
return fmt.Errorf("writing to socket: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
30
cmd/skopeo/proxy_windows.go
Normal file
30
cmd/skopeo/proxy_windows.go
Normal file
@@ -0,0 +1,30 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type proxyOptions struct {
|
||||
global *globalOptions
|
||||
}
|
||||
|
||||
func proxyCmd(global *globalOptions) *cobra.Command {
|
||||
opts := proxyOptions{global: global}
|
||||
cmd := &cobra.Command{
|
||||
RunE: commandAction(opts.run),
|
||||
Args: cobra.ExactArgs(0),
|
||||
// Not stabilized yet
|
||||
Hidden: true,
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
return fmt.Errorf("This command is not supported on Windows")
|
||||
}
|
||||
@@ -5,31 +5,30 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type standaloneSignOptions struct {
|
||||
output string // Output file path
|
||||
output string // Output file path
|
||||
passphraseFile string // Path pointing to a passphrase file when signing
|
||||
}
|
||||
|
||||
func standaloneSignCmd() cli.Command {
|
||||
func standaloneSignCmd() *cobra.Command {
|
||||
opts := standaloneSignOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
Destination: &opts.output,
|
||||
},
|
||||
},
|
||||
cmd := &cobra.Command{
|
||||
Use: "standalone-sign [command options] MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT --output|-o SIGNATURE",
|
||||
Short: "Create a signature using local files",
|
||||
RunE: commandAction(opts.run),
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.output, "output", "o", "", "output the signature to `SIGNATURE`")
|
||||
flags.StringVarP(&opts.passphraseFile, "passphrase-file", "", "", "file that contains a passphrase for the --sign-by key")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
@@ -40,7 +39,7 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading %s: %v", manifestPath, err)
|
||||
}
|
||||
@@ -50,12 +49,18 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
signature, err := signature.SignDockerManifest(manifest, dockerReference, mech, fingerprint)
|
||||
|
||||
passphrase, err := cli.ReadPassphraseFile(opts.passphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signature, err := signature.SignDockerManifestWithOptions(manifest, dockerReference, mech, fingerprint, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
if err := os.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
@@ -64,14 +69,15 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
type standaloneVerifyOptions struct {
|
||||
}
|
||||
|
||||
func standaloneVerifyCmd() cli.Command {
|
||||
func standaloneVerifyCmd() *cobra.Command {
|
||||
opts := standaloneVerifyOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: commandAction(opts.run),
|
||||
cmd := &cobra.Command{
|
||||
Use: "standalone-verify MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Short: "Verify a signature using local files",
|
||||
RunE: commandAction(opts.run),
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error {
|
||||
@@ -83,11 +89,11 @@ func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
unverifiedManifest, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
unverifiedSignature, err := ioutil.ReadFile(signaturePath)
|
||||
unverifiedSignature, err := os.ReadFile(signaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signature from %s: %v", signaturePath, err)
|
||||
}
|
||||
@@ -115,15 +121,16 @@ func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error
|
||||
type untrustedSignatureDumpOptions struct {
|
||||
}
|
||||
|
||||
func untrustedSignatureDumpCmd() cli.Command {
|
||||
func untrustedSignatureDumpCmd() *cobra.Command {
|
||||
opts := untrustedSignatureDumpOptions{}
|
||||
return cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
cmd := &cobra.Command{
|
||||
Use: "untrusted-signature-dump-without-verification SIGNATURE",
|
||||
Short: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
RunE: commandAction(opts.run),
|
||||
Hidden: true,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer) error {
|
||||
@@ -132,7 +139,7 @@ func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer)
|
||||
}
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
untrustedSignature, err := os.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading untrusted signature from %s: %v", untrustedSignaturePath, err)
|
||||
}
|
||||
|
||||
@@ -2,12 +2,11 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -25,9 +24,8 @@ const (
|
||||
// Test that results of runSkopeo failed with nothing on stdout, and substring
|
||||
// within the error message.
|
||||
func assertTestFailed(t *testing.T, stdout string, err error, substring string) {
|
||||
assert.Error(t, err)
|
||||
assert.ErrorContains(t, err, substring)
|
||||
assert.Empty(t, stdout)
|
||||
assert.Contains(t, err.Error(), substring)
|
||||
}
|
||||
|
||||
func TestStandaloneSign(t *testing.T) {
|
||||
@@ -40,8 +38,7 @@ func TestStandaloneSign(t *testing.T) {
|
||||
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", "fixtures")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
@@ -58,8 +55,8 @@ func TestStandaloneSign(t *testing.T) {
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
"/this/doesnt/exist", dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
"/this/does/not/exist", dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Invalid Docker reference
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
@@ -78,7 +75,7 @@ func TestStandaloneSign(t *testing.T) {
|
||||
assertTestFailed(t, out, err, "/dev/full")
|
||||
|
||||
// Success
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err = runSkopeo("standalone-sign", "-o", sigOutput.Name(),
|
||||
@@ -86,9 +83,9 @@ func TestStandaloneSign(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
sig, err := ioutil.ReadFile(sigOutput.Name())
|
||||
sig, err := os.ReadFile(sigOutput.Name())
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
mech, err = signature.NewGPGSigningMechanism()
|
||||
require.NoError(t, err)
|
||||
@@ -103,8 +100,7 @@ func TestStandaloneVerify(t *testing.T) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
signaturePath := "fixtures/image.signature"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", "fixtures")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
@@ -117,14 +113,14 @@ func TestStandaloneVerify(t *testing.T) {
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-verify", "/this/doesnt/exist",
|
||||
out, err := runSkopeo("standalone-verify", "/this/does/not/exist",
|
||||
dockerReference, fixturesTestKeyFingerprint, signaturePath)
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error reading signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
dockerReference, fixturesTestKeyFingerprint, "/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error verifying signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
@@ -151,8 +147,8 @@ func TestUntrustedSignatureDump(t *testing.T) {
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("untrusted-signature-dump-without-verification",
|
||||
"/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
"/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error reading signature (input is not a signature)
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", "fixtures/image.manifest.json")
|
||||
|
||||
687
cmd/skopeo/sync.go
Normal file
687
cmd/skopeo/sync.go
Normal file
@@ -0,0 +1,687 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// syncOptions contains information retrieved from the skopeo sync command line.
|
||||
type syncOptions struct {
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.Options
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
dryRun bool // Don't actually copy anything, just output what it would have done
|
||||
preserveDigests bool // Preserve digests during sync
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
type repoDescriptor struct {
|
||||
DirBasePath string // base path when source is 'dir'
|
||||
ImageRefs []types.ImageReference // List of tagged image found for the repository
|
||||
Context *types.SystemContext // SystemContext for the sync command
|
||||
}
|
||||
|
||||
// tlsVerifyConfig is an implementation of the Unmarshaler interface, used to
|
||||
// customize the unmarshaling behaviour of the tls-verify YAML key.
|
||||
type tlsVerifyConfig struct {
|
||||
skip types.OptionalBool // skip TLS verification check (false by default)
|
||||
}
|
||||
|
||||
// registrySyncConfig contains information about a single registry, read from
|
||||
// the source YAML file
|
||||
type registrySyncConfig struct {
|
||||
Images map[string][]string // Images map images name to slices with the images' references (tags, digests)
|
||||
ImagesByTagRegex map[string]string `yaml:"images-by-tag-regex"` // Images map images name to regular expression with the images' tags
|
||||
Credentials types.DockerAuthConfig // Username and password used to authenticate with the registry
|
||||
TLSVerify tlsVerifyConfig `yaml:"tls-verify"` // TLS verification mode (enabled by default)
|
||||
CertDir string `yaml:"cert-dir"` // Path to the TLS certificates of the registry
|
||||
}
|
||||
|
||||
// sourceConfig contains all registries information read from the source YAML file
|
||||
type sourceConfig map[string]registrySyncConfig
|
||||
|
||||
func syncCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
deprecatedTLSVerifyFlags, deprecatedTLSVerifyOpt := deprecatedTLSVerifyFlags()
|
||||
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "src-", "screds")
|
||||
destFlags, destOpts := dockerImageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "dest-", "dcreds")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
|
||||
opts := syncOptions{
|
||||
global: global,
|
||||
deprecatedTLSVerify: deprecatedTLSVerifyOpt,
|
||||
srcImage: srcOpts,
|
||||
destImage: &imageDestOptions{imageOptions: destOpts},
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "sync [command options] --src TRANSPORT --dest TRANSPORT SOURCE DESTINATION",
|
||||
Short: "Synchronize one or more images from one location to another",
|
||||
Long: `Copy all the images from a SOURCE to a DESTINATION.
|
||||
|
||||
Allowed SOURCE transports (specified with --src): docker, dir, yaml.
|
||||
Allowed DESTINATION transports (specified with --dest): docker, dir.
|
||||
|
||||
See skopeo-sync(1) for details.
|
||||
`,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "File that contains a passphrase for the --sign-by key")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.dryRun, "dry-run", false, "Run without actually copying data")
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVarP(&opts.keepGoing, "keep-going", "", false, "Do not abort the sync if any image copy fails")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&deprecatedTLSVerifyFlags)
|
||||
flags.AddFlagSet(&srcFlags)
|
||||
flags.AddFlagSet(&destFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// UnmarshalYAML is the implementation of the Unmarshaler interface method
|
||||
// method for the tlsVerifyConfig type.
|
||||
// It unmarshals the 'tls-verify' YAML key so that, when they key is not
|
||||
// specified, tls verification is enforced.
|
||||
func (tls *tlsVerifyConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var verify bool
|
||||
if err := unmarshal(&verify); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tls.skip = types.NewOptionalBool(!verify)
|
||||
return nil
|
||||
}
|
||||
|
||||
// newSourceConfig unmarshals the provided YAML file path to the sourceConfig type.
|
||||
// It returns a new unmarshaled sourceConfig object and any error encountered.
|
||||
func newSourceConfig(yamlFile string) (sourceConfig, error) {
|
||||
var cfg sourceConfig
|
||||
source, err := os.ReadFile(yamlFile)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
err = yaml.Unmarshal(source, &cfg)
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("Failed to unmarshal %q: %w", yamlFile, err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// parseRepositoryReference parses input into a reference.Named, and verifies that it names a repository, not an image.
|
||||
func parseRepositoryReference(input string) (reference.Named, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return nil, errors.New("input names a reference, not a repository")
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// destinationReference creates an image reference using the provided transport.
|
||||
// It returns a image reference to be used as destination of an image copy and
|
||||
// any error encountered.
|
||||
func destinationReference(destination string, transport string) (types.ImageReference, error) {
|
||||
var imageTransport types.ImageTransport
|
||||
|
||||
switch transport {
|
||||
case docker.Transport.Name():
|
||||
destination = fmt.Sprintf("//%s", destination)
|
||||
imageTransport = docker.Transport
|
||||
case directory.Transport.Name():
|
||||
_, err := os.Stat(destination)
|
||||
if err == nil {
|
||||
return nil, fmt.Errorf("Refusing to overwrite destination directory %q", destination)
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("Destination directory could not be used: %w", err)
|
||||
}
|
||||
// the directory holding the image must be created here
|
||||
if err = os.MkdirAll(destination, 0755); err != nil {
|
||||
return nil, fmt.Errorf("Error creating directory for image %s: %w", destination, err)
|
||||
}
|
||||
imageTransport = directory.Transport
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid destination transport", transport)
|
||||
}
|
||||
logrus.Debugf("Destination for transport %q: %s", transport, destination)
|
||||
|
||||
destRef, err := imageTransport.ParseReference(destination)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", imageTransport.Name(), destination, err)
|
||||
}
|
||||
|
||||
return destRef, nil
|
||||
}
|
||||
|
||||
// getImageTags lists all tags in a repository.
|
||||
// It returns a string slice of tags and any error encountered.
|
||||
func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef reference.Named) ([]string, error) {
|
||||
name := repoRef.Name()
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"image": name,
|
||||
}).Info("Getting tags")
|
||||
// Ugly: NewReference rejects IsNameOnly references, and GetRepositoryTags ignores the tag/digest.
|
||||
// So, we use TagNameOnly here only to shut up NewReference
|
||||
dockerRef, err := docker.NewReference(reference.TagNameOnly(repoRef))
|
||||
if err != nil {
|
||||
return nil, err // Should never happen for a reference with tag and no digest
|
||||
}
|
||||
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
|
||||
if err != nil {
|
||||
var unauthorizedForCredentials docker.ErrUnauthorizedForCredentials
|
||||
if errors.As(err, &unauthorizedForCredentials) {
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
tags = nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Error determining repository tags for image %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// imagesToCopyFromRepo builds a list of image references from the tags
|
||||
// found in a source repository.
|
||||
// It returns an image reference slice with as many elements as the tags found
|
||||
// and any error encountered.
|
||||
func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]types.ImageReference, error) {
|
||||
tags, err := getImageTags(context.Background(), sys, repoRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sourceReferences []types.ImageReference
|
||||
for _, tag := range tags {
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating a reference for repository %s and tag %q: %w", repoRef.Name(), tag, err)
|
||||
}
|
||||
ref, err := docker.NewReference(taggedRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %s: %w", docker.Transport.Name(), taggedRef.String(), err)
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
}
|
||||
return sourceReferences, nil
|
||||
}
|
||||
|
||||
// imagesToCopyFromDir builds a list of image references from the images found
|
||||
// in the source directory.
|
||||
// It returns an image reference slice with as many elements as the images found
|
||||
// and any error encountered.
|
||||
func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
var sourceReferences []types.ImageReference
|
||||
err := filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
dirname := filepath.Dir(path)
|
||||
ref, err := directory.Transport.ParseReference(dirname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", directory.Transport.Name(), dirname, err)
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return sourceReferences,
|
||||
fmt.Errorf("Error walking the path %q: %w", dirPath, err)
|
||||
}
|
||||
|
||||
return sourceReferences, nil
|
||||
}
|
||||
|
||||
// imagesToCopyFromRegistry builds a list of repository descriptors from the images
|
||||
// in a registry configuration.
|
||||
// It returns a repository descriptors slice with as many elements as the images
|
||||
// found and any error encountered. Each element of the slice is a list of
|
||||
// image references, to be used as sync source.
|
||||
func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourceCtx types.SystemContext) ([]repoDescriptor, error) {
|
||||
serverCtx := &sourceCtx
|
||||
// override ctx with per-registryName options
|
||||
serverCtx.DockerCertPath = cfg.CertDir
|
||||
serverCtx.DockerDaemonCertPath = cfg.CertDir
|
||||
serverCtx.DockerDaemonInsecureSkipTLSVerify = (cfg.TLSVerify.skip == types.OptionalBoolTrue)
|
||||
serverCtx.DockerInsecureSkipTLSVerify = cfg.TLSVerify.skip
|
||||
if cfg.Credentials != (types.DockerAuthConfig{}) {
|
||||
serverCtx.DockerAuthConfig = &cfg.Credentials
|
||||
}
|
||||
var repoDescList []repoDescriptor
|
||||
for imageName, refs := range cfg.Images {
|
||||
repoLogger := logrus.WithFields(logrus.Fields{
|
||||
"repo": imageName,
|
||||
"registry": registryName,
|
||||
})
|
||||
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", registryName, imageName))
|
||||
if err != nil {
|
||||
repoLogger.Error("Error parsing repository name, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Info("Processing repo")
|
||||
|
||||
var sourceReferences []types.ImageReference
|
||||
if len(refs) != 0 {
|
||||
for _, ref := range refs {
|
||||
tagLogger := logrus.WithFields(logrus.Fields{"ref": ref})
|
||||
var named reference.Named
|
||||
// first try as digest
|
||||
if d, err := digest.Parse(ref); err == nil {
|
||||
named, err = reference.WithDigest(repoRef, d)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error processing ref, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
tagLogger.Debugf("Ref was not a digest, trying as a tag: %s", err)
|
||||
named, err = reference.WithTag(repoRef, ref)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error parsing ref, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
imageRef, err := docker.NewReference(named)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error processing ref, skipping")
|
||||
logrus.Errorf("Error getting image reference: %s", err)
|
||||
continue
|
||||
}
|
||||
sourceReferences = append(sourceReferences, imageRef)
|
||||
}
|
||||
} else { // len(refs) == 0
|
||||
repoLogger.Info("Querying registry for image tags")
|
||||
sourceReferences, err = imagesToCopyFromRepo(serverCtx, repoRef)
|
||||
if err != nil {
|
||||
repoLogger.Error("Error processing repo, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(sourceReferences) == 0 {
|
||||
repoLogger.Warnf("No refs to sync found")
|
||||
continue
|
||||
}
|
||||
repoDescList = append(repoDescList, repoDescriptor{
|
||||
ImageRefs: sourceReferences,
|
||||
Context: serverCtx})
|
||||
}
|
||||
|
||||
for imageName, tagRegex := range cfg.ImagesByTagRegex {
|
||||
repoLogger := logrus.WithFields(logrus.Fields{
|
||||
"repo": imageName,
|
||||
"registry": registryName,
|
||||
})
|
||||
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", registryName, imageName))
|
||||
if err != nil {
|
||||
repoLogger.Error("Error parsing repository name, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Info("Processing repo")
|
||||
|
||||
var sourceReferences []types.ImageReference
|
||||
|
||||
tagReg, err := regexp.Compile(tagRegex)
|
||||
if err != nil {
|
||||
repoLogger.WithFields(logrus.Fields{
|
||||
"regex": tagRegex,
|
||||
}).Error("Error parsing regex, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Info("Querying registry for image tags")
|
||||
allSourceReferences, err := imagesToCopyFromRepo(serverCtx, repoRef)
|
||||
if err != nil {
|
||||
repoLogger.Error("Error processing repo, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoLogger.Infof("Start filtering using the regular expression: %v", tagRegex)
|
||||
for _, sReference := range allSourceReferences {
|
||||
tagged, isTagged := sReference.DockerReference().(reference.Tagged)
|
||||
if !isTagged {
|
||||
repoLogger.Errorf("Internal error, reference %s does not have a tag, skipping", sReference.DockerReference())
|
||||
continue
|
||||
}
|
||||
if tagReg.MatchString(tagged.Tag()) {
|
||||
sourceReferences = append(sourceReferences, sReference)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sourceReferences) == 0 {
|
||||
repoLogger.Warnf("No refs to sync found")
|
||||
continue
|
||||
}
|
||||
repoDescList = append(repoDescList, repoDescriptor{
|
||||
ImageRefs: sourceReferences,
|
||||
Context: serverCtx})
|
||||
}
|
||||
|
||||
return repoDescList, nil
|
||||
}
|
||||
|
||||
// imagesToCopy retrieves all the images to copy from a specified sync source
|
||||
// and transport.
|
||||
// It returns a slice of repository descriptors, where each descriptor is a
|
||||
// list of tagged image references to be used as sync source, and any error
|
||||
// encountered.
|
||||
func imagesToCopy(source string, transport string, sourceCtx *types.SystemContext) ([]repoDescriptor, error) {
|
||||
var descriptors []repoDescriptor
|
||||
|
||||
switch transport {
|
||||
case docker.Transport.Name():
|
||||
desc := repoDescriptor{
|
||||
Context: sourceCtx,
|
||||
}
|
||||
named, err := reference.ParseNormalizedNamed(source) // May be a repository or an image.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", docker.Transport.Name(), source, err)
|
||||
}
|
||||
imageTagged := !reference.IsNameOnly(named)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"imagename": source,
|
||||
"tagged": imageTagged,
|
||||
}).Info("Tag presence check")
|
||||
if imageTagged {
|
||||
srcRef, err := docker.NewReference(named)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", docker.Transport.Name(), named.String(), err)
|
||||
}
|
||||
desc.ImageRefs = []types.ImageReference{srcRef}
|
||||
} else {
|
||||
desc.ImageRefs, err = imagesToCopyFromRepo(sourceCtx, named)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, fmt.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
|
||||
case directory.Transport.Name():
|
||||
desc := repoDescriptor{
|
||||
Context: sourceCtx,
|
||||
}
|
||||
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return descriptors, fmt.Errorf("Invalid source directory specified: %w", err)
|
||||
}
|
||||
desc.DirBasePath = source
|
||||
var err error
|
||||
desc.ImageRefs, err = imagesToCopyFromDir(source)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, fmt.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
|
||||
case "yaml":
|
||||
cfg, err := newSourceConfig(source)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
for registryName, registryConfig := range cfg {
|
||||
if len(registryConfig.Images) == 0 && len(registryConfig.ImagesByTagRegex) == 0 {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"registry": registryName,
|
||||
}).Warn("No images specified for registry")
|
||||
continue
|
||||
}
|
||||
|
||||
descs, err := imagesToCopyFromRegistry(registryName, registryConfig, *sourceCtx)
|
||||
if err != nil {
|
||||
return descriptors, fmt.Errorf("Failed to retrieve list of images from registry %q: %w", registryName, err)
|
||||
}
|
||||
descriptors = append(descriptors, descs...)
|
||||
}
|
||||
}
|
||||
|
||||
return descriptors, nil
|
||||
}
|
||||
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
opts.deprecatedTLSVerify.warnIfUsed([]string{"--src-tls-verify", "--dest-tls-verify"})
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "tearing down policy context", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// validate source and destination options
|
||||
contains := func(val string, list []string) (_ bool) {
|
||||
for _, l := range list {
|
||||
if l == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if len(opts.source) == 0 {
|
||||
return errors.New("A source transport must be specified")
|
||||
}
|
||||
if !contains(opts.source, []string{docker.Transport.Name(), directory.Transport.Name(), "yaml"}) {
|
||||
return fmt.Errorf("%q is not a valid source transport", opts.source)
|
||||
}
|
||||
|
||||
if len(opts.destination) == 0 {
|
||||
return errors.New("A destination transport must be specified")
|
||||
}
|
||||
if !contains(opts.destination, []string{docker.Transport.Name(), directory.Transport.Name()}) {
|
||||
return fmt.Errorf("%q is not a valid destination transport", opts.destination)
|
||||
}
|
||||
|
||||
if opts.source == opts.destination && opts.source == directory.Transport.Name() {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.Present() {
|
||||
manifestType, err = parseManifestFormat(opts.format.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
sourceArg := args[0]
|
||||
var srcRepoList []repoDescriptor
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
destination := args[1]
|
||||
destinationCtx, err := opts.destImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// c/image/copy.Image does allow creating both simple signing and sigstore signatures simultaneously,
|
||||
// with independent passphrases, but that would make the CLI probably too confusing.
|
||||
// For now, use the passphrase with either, but only one of them.
|
||||
if opts.signPassphraseFile != "" && opts.signByFingerprint != "" && opts.signBySigstorePrivateKey != "" {
|
||||
return fmt.Errorf("Only one of --sign-by and sign-by-sigstore-private-key can be used with sign-passphrase-file")
|
||||
}
|
||||
var passphrase string
|
||||
if opts.signPassphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} else if opts.signBySigstorePrivateKey != "" {
|
||||
p, err := promptForPassphrase(opts.signBySigstorePrivateKey, os.Stdin, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
}
|
||||
options := copy.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
|
||||
ReportWriter: os.Stdout,
|
||||
DestinationCtx: destinationCtx,
|
||||
ImageListSelection: imageListSelection,
|
||||
PreserveDigests: opts.preserveDigests,
|
||||
OptimizeDestinationImageAlreadyExists: true,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
}
|
||||
errorsPresent := false
|
||||
imagesNumber := 0
|
||||
if opts.dryRun {
|
||||
logrus.Warn("Running in dry-run mode")
|
||||
}
|
||||
|
||||
for _, srcRepo := range srcRepoList {
|
||||
options.SourceCtx = srcRepo.Context
|
||||
for counter, ref := range srcRepo.ImageRefs {
|
||||
var destSuffix string
|
||||
switch ref.Transport() {
|
||||
case docker.Transport:
|
||||
// docker -> dir or docker -> docker
|
||||
destSuffix = ref.DockerReference().String()
|
||||
case directory.Transport:
|
||||
// dir -> docker (we don't allow `dir` -> `dir` sync operations)
|
||||
destSuffix = strings.TrimPrefix(ref.StringWithinTransport(), srcRepo.DirBasePath)
|
||||
if destSuffix == "" {
|
||||
// if source is a full path to an image, have destPath scoped to repo:tag
|
||||
destSuffix = path.Base(srcRepo.DirBasePath)
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.scoped {
|
||||
destSuffix = path.Base(destSuffix)
|
||||
}
|
||||
|
||||
destRef, err := destinationReference(path.Join(destination, destSuffix), opts.destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromToFields := logrus.Fields{
|
||||
"from": transports.ImageName(ref),
|
||||
"to": transports.ImageName(destRef),
|
||||
}
|
||||
if opts.dryRun {
|
||||
logrus.WithFields(fromToFields).Infof("Would have copied image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
} else {
|
||||
logrus.WithFields(fromToFields).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
return fmt.Errorf("Error copying ref %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
// log the error, keep a note that there was a failure and move on to the next
|
||||
// image ref
|
||||
errorsPresent = true
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}
|
||||
}
|
||||
imagesNumber++
|
||||
}
|
||||
}
|
||||
|
||||
if opts.dryRun {
|
||||
logrus.Infof("Would have synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
} else {
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
}
|
||||
if !errorsPresent {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Sync failed due to previous reported error(s) for one or more images")
|
||||
}
|
||||
@@ -1,11 +1,8 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
func maybeReexec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(inputImageNames ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/buildah/pkg/unshare"
|
||||
"github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/pkg/errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ func maybeReexec() error {
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
return fmt.Errorf("error reading the current capabilities sets: %w", err)
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
@@ -36,10 +36,12 @@ func maybeReexec() error {
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(imageNames ...string) error {
|
||||
// Check if container-storage are used before doing unshare
|
||||
// Check if container-storage is used before doing unshare
|
||||
for _, imageName := range imageNames {
|
||||
transport := alltransports.TransportFromImageName(imageName)
|
||||
if transport != nil && transport.Name() == storage.Transport.Name() {
|
||||
// Hard-code the storage name to avoid a reference on c/image/storage.
|
||||
// See https://github.com/containers/skopeo/issues/771#issuecomment-563125006.
|
||||
if transport != nil && transport.Name() == "containers-storage" {
|
||||
return maybeReexec()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,22 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/urfave/cli"
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
@@ -16,170 +26,247 @@ type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// commandAction intermediates between the cli.ActionFunc interface and the real handler,
|
||||
// primarily to ensure that cli.Context is not available to the handler, which in turn
|
||||
// makes sure that the cli.String() etc. flag access functions are not used,
|
||||
// and everything is done using the *Options structures and the Destination: members of cli.Flag.
|
||||
// handler may return errorShouldDisplayUsage to cause cli.ShowSubcommandHelp to be called.
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
err := handler(([]string)(c.Args()), c.App.Writer)
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
cli.ShowSubcommandHelp(c)
|
||||
// noteCloseFailure returns (possibly-nil) err modified to account for (non-nil) closeErr.
|
||||
// The error for closeErr is annotated with description (which is not a format string)
|
||||
// Typical usage:
|
||||
//
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
func noteCloseFailure(err error, description string, closeErr error) error {
|
||||
// We don’t accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
|
||||
// This also makes it harder for a caller to do
|
||||
// defer noteCloseFailure(returnedErr, …)
|
||||
// which doesn’t use the right value of returnedErr, and doesn’t update it.
|
||||
if err == nil {
|
||||
return fmt.Errorf("%s: %w", description, closeErr)
|
||||
}
|
||||
// In this case we prioritize the primary error for use with %w; closeErr is usually less relevant, or might be a consequence of the primary erorr.
|
||||
return fmt.Errorf("%w (%s: %v)", err, description, closeErr)
|
||||
}
|
||||
|
||||
// commandAction intermediates between the RunE interface and the real handler,
|
||||
// primarily to ensure that cobra.Command is not available to the handler, which in turn
|
||||
// makes sure that the cmd.Flags() etc. flag access functions are not used,
|
||||
// and everything is done using the *Options structures and the *Var() methods of cmd.Flag().
|
||||
// handler may return errorShouldDisplayUsage to cause c.Help to be called.
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd *cobra.Command, args []string) error {
|
||||
return func(c *cobra.Command, args []string) error {
|
||||
err := handler(args, c.OutOrStdout())
|
||||
var shouldDisplayUsage errorShouldDisplayUsage
|
||||
if errors.As(err, &shouldDisplayUsage) {
|
||||
return c.Help()
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// deprecatedTLSVerifyOption represents a deprecated --tls-verify option,
|
||||
// which was accepted for all subcommands, for a time.
|
||||
// Every user should call deprecatedTLSVerifyOption.warnIfUsed() as part of handling the CLI,
|
||||
// whether or not the value actually ends up being used.
|
||||
// DO NOT ADD ANY NEW USES OF THIS; just call dockerImageFlags with an appropriate, possibly empty, flagPrefix.
|
||||
type deprecatedTLSVerifyOption struct {
|
||||
tlsVerify commonFlag.OptionalBool // FIXME FIXME: Warn if this is used, or even if it is ignored.
|
||||
}
|
||||
|
||||
// warnIfUsed warns if tlsVerify was set by the user, and suggests alternatives (which should
|
||||
// start with "--").
|
||||
// Every user should call this as part of handling the CLI, whether or not the value actually
|
||||
// ends up being used.
|
||||
func (opts *deprecatedTLSVerifyOption) warnIfUsed(alternatives []string) {
|
||||
if opts.tlsVerify.Present() {
|
||||
logrus.Warnf("'--tls-verify' is deprecated, instead use: %s", strings.Join(alternatives, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// deprecatedTLSVerifyFlags prepares the CLI flag writing into deprecatedTLSVerifyOption, and the managed deprecatedTLSVerifyOption structure.
|
||||
// DO NOT ADD ANY NEW USES OF THIS; just call dockerImageFlags with an appropriate, possibly empty, flagPrefix.
|
||||
func deprecatedTLSVerifyFlags() (pflag.FlagSet, *deprecatedTLSVerifyOption) {
|
||||
opts := deprecatedTLSVerifyOption{}
|
||||
fs := pflag.FlagSet{}
|
||||
flag := commonFlag.OptionalBoolFlag(&fs, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the container registry")
|
||||
flag.Hidden = true
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
|
||||
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
|
||||
type sharedImageOptions struct {
|
||||
authFilePath string // Path to a */containers/auth.json
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into sharedImageOptions, and the managed sharedImageOptions structure.
|
||||
func sharedImageFlags() ([]cli.Flag, *sharedImageOptions) {
|
||||
// sharedImageFlags prepares a collection of CLI flags writing into sharedImageOptions, and the managed sharedImageOptions structure.
|
||||
func sharedImageFlags() (pflag.FlagSet, *sharedImageOptions) {
|
||||
opts := sharedImageOptions{}
|
||||
return []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
Destination: &opts.authFilePath,
|
||||
},
|
||||
}, &opts
|
||||
fs := pflag.FlagSet{}
|
||||
fs.StringVar(&opts.authFilePath, "authfile", os.Getenv("REGISTRY_AUTH_FILE"), "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// dockerImageOptions collects CLI flags specific to the "docker" transport, which are
|
||||
// the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type dockerImageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption // May be shared across several imageOptions instances, or nil.
|
||||
authFilePath commonFlag.OptionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
|
||||
credsOption commonFlag.OptionalString // username[:password] for accessing a registry
|
||||
userName commonFlag.OptionalString // username for accessing a registry
|
||||
password commonFlag.OptionalString // password for accessing a registry
|
||||
registryToken commonFlag.OptionalString // token to be used directly as a Bearer token when accessing the registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify commonFlag.OptionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type imageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
noCreds bool // Access the registry anonymously
|
||||
dockerImageOptions
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
}
|
||||
|
||||
// dockerImageFlags prepares a collection of docker-transport specific CLI flags
|
||||
// writing into imageOptions, and the managed imageOptions structure.
|
||||
func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
flags := imageOptions{
|
||||
dockerImageOptions: dockerImageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
deprecatedTLSVerify: deprecatedTLSVerify,
|
||||
},
|
||||
}
|
||||
|
||||
fs := pflag.FlagSet{}
|
||||
if flagPrefix != "" {
|
||||
// the non-prefixed flag is handled by a shared flag.
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.authFilePath), flagPrefix+"authfile", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
|
||||
}
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.credsOption), flagPrefix+"creds", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.userName), flagPrefix+"username", "Username for accessing the registry")
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.password), flagPrefix+"password", "Password for accessing the registry")
|
||||
if credsOptionAlias != "" {
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
f := fs.VarPF(commonFlag.NewOptionalStringValue(&flags.credsOption), credsOptionAlias, "", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
f.Hidden = true
|
||||
}
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.registryToken), flagPrefix+"registry-token", "Provide a Bearer token for accessing the registry")
|
||||
fs.StringVar(&flags.dockerCertPath, flagPrefix+"cert-dir", "", "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon")
|
||||
commonFlag.OptionalBoolFlag(&fs, &flags.tlsVerify, flagPrefix+"tls-verify", "require HTTPS and verify certificates when talking to the container registry or daemon")
|
||||
fs.BoolVar(&flags.noCreds, flagPrefix+"no-creds", false, "Access the registry anonymously")
|
||||
return fs, &flags
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageOptions) {
|
||||
opts := imageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
}
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
dockerFlags, opts := dockerImageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
credsOptionExtra := ""
|
||||
if credsOptionAlias != "" {
|
||||
credsOptionExtra += "," + credsOptionAlias
|
||||
}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.StringVar(&opts.sharedBlobDir, flagPrefix+"shared-blob-dir", "", "`DIRECTORY` to use to share blobs across OCI repositories")
|
||||
fs.StringVar(&opts.dockerDaemonHost, flagPrefix+"daemon-host", "", "use docker daemon host at `HOST` (docker-daemon: only)")
|
||||
fs.AddFlagSet(&dockerFlags)
|
||||
return fs, opts
|
||||
}
|
||||
|
||||
return []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "creds" + credsOptionExtra,
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
Value: newOptionalStringValue(&opts.credsOption),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "cert-dir",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon",
|
||||
Destination: &opts.dockerCertPath,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)",
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "shared-blob-dir",
|
||||
Usage: "`DIRECTORY` to use to share blobs across OCI repositories",
|
||||
Destination: &opts.sharedBlobDir,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "daemon-host",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon: only)",
|
||||
Destination: &opts.dockerDaemonHost,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "no-creds",
|
||||
Usage: "Access the registry anonymously",
|
||||
Destination: &opts.noCreds,
|
||||
},
|
||||
}, &opts
|
||||
func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: opts.global.registriesDirPath,
|
||||
ArchitectureChoice: opts.global.overrideArch,
|
||||
OSChoice: opts.global.overrideOS,
|
||||
DockerCertPath: opts.dockerCertPath,
|
||||
OCISharedBlobDirPath: opts.sharedBlobDir,
|
||||
AuthFilePath: opts.shared.authFilePath,
|
||||
DockerDaemonHost: opts.dockerDaemonHost,
|
||||
DockerDaemonCertPath: opts.dockerCertPath,
|
||||
SystemRegistriesConfPath: opts.global.registriesConfPath,
|
||||
// *types.SystemContext instance from globalOptions
|
||||
// imageOptions option overrides the instance if both are present.
|
||||
ctx := opts.global.newSystemContext()
|
||||
ctx.DockerCertPath = opts.dockerCertPath
|
||||
ctx.OCISharedBlobDirPath = opts.sharedBlobDir
|
||||
ctx.AuthFilePath = opts.shared.authFilePath
|
||||
ctx.DockerDaemonHost = opts.dockerDaemonHost
|
||||
ctx.DockerDaemonCertPath = opts.dockerCertPath
|
||||
if opts.dockerImageOptions.authFilePath.Present() {
|
||||
ctx.AuthFilePath = opts.dockerImageOptions.authFilePath.Value()
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
if opts.deprecatedTLSVerify != nil && opts.deprecatedTLSVerify.tlsVerify.Present() {
|
||||
// If both this deprecated option and a non-deprecated option is present, we use the latter value.
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.deprecatedTLSVerify.tlsVerify.Value())
|
||||
}
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.global.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.global.tlsVerify.value)
|
||||
if opts.tlsVerify.Present() {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.Value()
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
if opts.tlsVerify.Present() {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
if opts.credsOption.present && opts.noCreds {
|
||||
if opts.credsOption.Present() && opts.noCreds {
|
||||
return nil, errors.New("creds and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
if opts.userName.Present() && opts.noCreds {
|
||||
return nil, errors.New("username and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.Present() && opts.userName.Present() {
|
||||
return nil, errors.New("creds and username cannot be specified at the same time")
|
||||
}
|
||||
// if any of username or password is present, then both are expected to be present
|
||||
if opts.userName.Present() != opts.password.Present() {
|
||||
if opts.userName.Present() {
|
||||
return nil, errors.New("password must be specified when username is specified")
|
||||
}
|
||||
return nil, errors.New("username must be specified when password is specified")
|
||||
}
|
||||
if opts.credsOption.Present() {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.Value())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if opts.userName.Present() {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{
|
||||
Username: opts.userName.Value(),
|
||||
Password: opts.password.Value(),
|
||||
}
|
||||
}
|
||||
if opts.registryToken.Present() {
|
||||
ctx.DockerBearerRegistryToken = opts.registryToken.Value()
|
||||
}
|
||||
if opts.noCreds {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
osTreeTmpDir string // A directory to use for OSTree temporary files
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
dirForceDecompression bool // Decompress layers when saving to the dir: transport
|
||||
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel commonFlag.OptionalInt // Level to use for the compression
|
||||
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
|
||||
return append(genericFlags, []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "ostree-tmp-dir",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
Destination: &opts.osTreeTmpDir,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
Destination: &opts.dirForceCompression,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "oci-accept-uncompressed-layers",
|
||||
Usage: "Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)",
|
||||
Destination: &opts.ociAcceptUncompressedLayers,
|
||||
},
|
||||
}...), &opts
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
fs.BoolVar(&opts.dirForceDecompression, flagPrefix+"decompress", false, "Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
fs.BoolVar(&opts.ociAcceptUncompressedLayers, flagPrefix+"oci-accept-uncompressed-layers", false, "Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)")
|
||||
fs.StringVar(&opts.compressionFormat, flagPrefix+"compress-format", "", "`FORMAT` to use for the compression")
|
||||
fs.Var(commonFlag.NewOptionalIntValue(&opts.compressionLevel), flagPrefix+"compress-level", "`LEVEL` to use for the compression")
|
||||
fs.BoolVar(&opts.precomputeDigests, flagPrefix+"precompute-digests", false, "Precompute digests to prevent uploading layers already on the registry using the 'docker' transport.")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
@@ -190,9 +277,21 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx.OSTreeTmpDirPath = opts.osTreeTmpDir
|
||||
ctx.DirForceCompress = opts.dirForceCompression
|
||||
ctx.DirForceDecompress = opts.dirForceDecompression
|
||||
ctx.OCIAcceptUncompressedLayers = opts.ociAcceptUncompressedLayers
|
||||
if opts.compressionFormat != "" {
|
||||
cf, err := compression.AlgorithmByName(opts.compressionFormat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx.CompressionFormat = &cf
|
||||
}
|
||||
if opts.compressionLevel.Present() {
|
||||
value := opts.compressionLevel.Value()
|
||||
ctx.CompressionLevel = &value
|
||||
}
|
||||
ctx.DockerRegistryPushPrecomputeDigests = opts.precomputeDigests
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
@@ -221,20 +320,6 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseImage converts image URL-like string to an initialized handler for that image.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
func parseImage(ctx context.Context, opts *imageOptions, name string) (types.ImageCloser, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref.NewImage(ctx, sys)
|
||||
}
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(ctx context.Context, opts *imageOptions, name string) (types.ImageSource, error) {
|
||||
@@ -248,3 +333,63 @@ func parseImageSource(ctx context.Context, opts *imageOptions, name string) (typ
|
||||
}
|
||||
return ref.NewImageSource(ctx, sys)
|
||||
}
|
||||
|
||||
// parseManifestFormat parses format parameter for copy and sync command.
|
||||
// It returns string value to use as manifest MIME type
|
||||
func parseManifestFormat(manifestFormat string) (string, error) {
|
||||
switch manifestFormat {
|
||||
case "oci":
|
||||
return imgspecv1.MediaTypeImageManifest, nil
|
||||
case "v2s1":
|
||||
return manifest.DockerV2Schema1SignedMediaType, nil
|
||||
case "v2s2":
|
||||
return manifest.DockerV2Schema2MediaType, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", manifestFormat)
|
||||
}
|
||||
}
|
||||
|
||||
// usageTemplate returns the usage template for skopeo commands
|
||||
// This blocks the displaying of the global options. The main skopeo
|
||||
// command should not use this.
|
||||
const usageTemplate = `Usage:{{if .Runnable}}
|
||||
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
|
||||
|
||||
Aliases:
|
||||
{{.NameAndAliases}}{{end}}{{if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
|
||||
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
// adjustUsage uses usageTemplate template to get rid the GlobalOption from usage
|
||||
// and disable [flag] at the end of command usage
|
||||
func adjustUsage(c *cobra.Command) {
|
||||
c.SetUsageTemplate(usageTemplate)
|
||||
c.DisableFlagsInUseLine = true
|
||||
}
|
||||
|
||||
// promptForPassphrase interactively prompts for a passphrase related to privateKeyFile
|
||||
func promptForPassphrase(privateKeyFile string, stdin, stdout *os.File) (string, error) {
|
||||
stdinFd := int(stdin.Fd())
|
||||
if !term.IsTerminal(stdinFd) {
|
||||
return "", fmt.Errorf("Cannot prompt for a passphrase for key %s, standard input is not a TTY", privateKeyFile)
|
||||
}
|
||||
|
||||
fmt.Fprintf(stdout, "Passphrase for key %s: ", privateKeyFile)
|
||||
passphrase, err := term.ReadPassword(stdinFd)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading password: %w", err)
|
||||
}
|
||||
fmt.Fprintf(stdout, "\n")
|
||||
return string(passphrase), nil
|
||||
}
|
||||
|
||||
@@ -1,184 +1,428 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNoteCloseFailure(t *testing.T) {
|
||||
const description = "description"
|
||||
|
||||
mainErr := errors.New("main")
|
||||
closeErr := errors.New("closing")
|
||||
|
||||
// Main success, closing failed
|
||||
res := noteCloseFailure(nil, description, closeErr)
|
||||
require.NotNil(t, res)
|
||||
assert.Contains(t, res.Error(), description)
|
||||
assert.Contains(t, res.Error(), closeErr.Error())
|
||||
|
||||
// Both main and closing failed
|
||||
res = noteCloseFailure(mainErr, description, closeErr)
|
||||
require.NotNil(t, res)
|
||||
assert.Contains(t, res.Error(), mainErr.Error())
|
||||
assert.Contains(t, res.Error(), description)
|
||||
assert.Contains(t, res.Error(), closeErr.Error())
|
||||
assert.ErrorIs(t, res, mainErr)
|
||||
}
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) *globalOptions {
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) (*globalOptions, *cobra.Command) {
|
||||
app, opts := createApp()
|
||||
|
||||
flagSet := flag.NewFlagSet(app.Name, flag.ContinueOnError)
|
||||
for _, f := range app.Flags {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(flags)
|
||||
cmd := &cobra.Command{}
|
||||
app.AddCommand(cmd)
|
||||
err := app.ParseFlags(flags)
|
||||
require.NoError(t, err)
|
||||
|
||||
return opts
|
||||
return opts, cmd
|
||||
}
|
||||
|
||||
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerify bool,
|
||||
globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
var deprecatedTLSVerifyFlag pflag.FlagSet
|
||||
var deprecatedTLSVerifyOpt *deprecatedTLSVerifyOption
|
||||
if useDeprecatedTLSVerify {
|
||||
deprecatedTLSVerifyFlag, deprecatedTLSVerifyOpt = deprecatedTLSVerifyFlags()
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, deprecatedTLSVerifyOpt, flagPrefix, "")
|
||||
cmd.Flags().AddFlagSet(&sharedFlags)
|
||||
if useDeprecatedTLSVerify {
|
||||
cmd.Flags().AddFlagSet(&deprecatedTLSVerifyFlag)
|
||||
}
|
||||
cmd.Flags().AddFlagSet(&imageFlags)
|
||||
err := cmd.ParseFlags(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
opts := fakeImageOptions(t, "dest-", true, []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
opts = fakeImageOptions(t, "dest-", true, []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
"--override-variant", "overridden-variant",
|
||||
"--tmpdir", "/srv",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-authfile", "/srv/dest-authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
"--dest-registry-token", "faketoken",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
AuthFilePath: "/srv/dest-authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerBearerRegistryToken: "faketoken",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior
|
||||
for _, c := range []struct {
|
||||
global, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", types.OptionalBoolUndefined, false},
|
||||
{"", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
// Global/per-command tlsVerify behavior is tested in TestTLSVerifyFlags.
|
||||
|
||||
// Invalid option values
|
||||
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
opts = fakeImageOptions(t, "dest-", true, []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerify bool,
|
||||
globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageDestOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
var deprecatedTLSVerifyFlag pflag.FlagSet
|
||||
var deprecatedTLSVerifyOpt *deprecatedTLSVerifyOption
|
||||
if useDeprecatedTLSVerify {
|
||||
deprecatedTLSVerifyFlag, deprecatedTLSVerifyOpt = deprecatedTLSVerifyFlags()
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, deprecatedTLSVerifyOpt, flagPrefix, "")
|
||||
cmd.Flags().AddFlagSet(&sharedFlags)
|
||||
if useDeprecatedTLSVerify {
|
||||
cmd.Flags().AddFlagSet(&deprecatedTLSVerifyFlag)
|
||||
}
|
||||
cmd.Flags().AddFlagSet(&imageFlags)
|
||||
err := cmd.ParseFlags(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
opts := fakeImageDestOptions(t, "dest-", true, []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
authFile := "/tmp/auth.json"
|
||||
// Make sure when REGISTRY_AUTH_FILE is set the auth file is used
|
||||
t.Setenv("REGISTRY_AUTH_FILE", authFile)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{
|
||||
"--dest-compress=false",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
AuthFilePath: authFile,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
"--override-variant", "overridden-variant",
|
||||
"--tmpdir", "/srv",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-ostree-tmp-dir", "/srv/ostree-tmp-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-compress=true",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
"--dest-registry-token", "faketoken",
|
||||
"--dest-precompute-digests=true",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
OSTreeTmpDirPath: "/srv/ostree-tmp-dir",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DirForceCompress: true,
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerBearerRegistryToken: "faketoken",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
DirForceCompress: true,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
DockerRegistryPushPrecomputeDigests: true,
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior is tested in TestTLSVerifyFlags.
|
||||
|
||||
// Invalid option values in imageOptions
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// TestImageOptionsUsernamePassword verifies that using the username and password
|
||||
// options works as expected
|
||||
func TestImageOptionsUsernamePassword(t *testing.T) {
|
||||
for _, command := range []struct {
|
||||
commandArgs []string
|
||||
expectedAuthConfig *types.DockerAuthConfig // data to expect, or nil if an error is expected
|
||||
}{
|
||||
// Set only username/password (without --creds), expected to pass
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "foo", "--dest-password", "bar"},
|
||||
expectedAuthConfig: &types.DockerAuthConfig{Username: "foo", Password: "bar"},
|
||||
},
|
||||
// no username but set password, expect error
|
||||
{
|
||||
commandArgs: []string{"--dest-password", "foo"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
// set username but no password. expected to fail (we currently don't allow a user without password)
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "bar"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
// set username with --creds, expected to fail
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "bar", "--dest-creds", "hello:world", "--dest-password", "foo"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
// set username with --no-creds, expected to fail
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "bar", "--dest-no-creds", "--dest-password", "foo"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
} {
|
||||
opts := fakeImageDestOptions(t, "dest-", true, []string{}, command.commandArgs)
|
||||
// parse the command options
|
||||
res, err := opts.newSystemContext()
|
||||
if command.expectedAuthConfig == nil {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
DockerAuthConfig: command.expectedAuthConfig,
|
||||
}, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTLSVerifyFlags(t *testing.T) {
|
||||
type systemContextOpts interface { // Either *imageOptions or *imageDestOptions
|
||||
newSystemContext() (*types.SystemContext, error)
|
||||
}
|
||||
|
||||
for _, creator := range []struct {
|
||||
name string
|
||||
newOpts func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts
|
||||
}{
|
||||
{
|
||||
"imageFlags",
|
||||
func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts {
|
||||
return fakeImageOptions(t, "dest-", useDeprecatedTLSVerify, globalFlags, cmdFlags)
|
||||
},
|
||||
},
|
||||
{
|
||||
"imageDestFlags",
|
||||
func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts {
|
||||
return fakeImageDestOptions(t, "dest-", useDeprecatedTLSVerify, globalFlags, cmdFlags)
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(creator.name, func(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
global, deprecatedCmd, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", "", types.OptionalBoolUndefined, false},
|
||||
{"", "", "false", types.OptionalBoolTrue, true},
|
||||
{"", "", "true", types.OptionalBoolFalse, false},
|
||||
{"", "false", "", types.OptionalBoolTrue, false},
|
||||
{"", "false", "false", types.OptionalBoolTrue, true},
|
||||
{"", "false", "true", types.OptionalBoolFalse, false},
|
||||
{"", "true", "", types.OptionalBoolFalse, false},
|
||||
{"", "true", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", "", types.OptionalBoolTrue, false},
|
||||
{"false", "", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "false", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "true", "", types.OptionalBoolFalse, false},
|
||||
{"false", "true", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", "", types.OptionalBoolFalse, false},
|
||||
{"true", "", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "false", "", types.OptionalBoolTrue, false},
|
||||
{"true", "false", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.deprecatedCmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--tls-verify="+c.deprecatedCmd)
|
||||
}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := creator.newOpts(true, globalFlags, cmdFlags)
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
|
||||
if c.deprecatedCmd == "" { // Test also the behavior when deprecatedTLSFlag is not recognized
|
||||
// Use globalFlags from the previous test
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := creator.newOpts(false, globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseManifestFormat(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
formatParam string
|
||||
expectedManifestType string
|
||||
expectErr bool
|
||||
}{
|
||||
{"oci",
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
false},
|
||||
{"v2s1",
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
false},
|
||||
{"v2s2",
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
false},
|
||||
{"",
|
||||
"",
|
||||
true},
|
||||
{"badValue",
|
||||
"",
|
||||
true},
|
||||
} {
|
||||
manifestType, err := parseManifestFormat(testCase.formatParam)
|
||||
if testCase.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, manifestType, testCase.expectedManifestType)
|
||||
}
|
||||
}
|
||||
|
||||
// since there is a shared authfile image option and a non-shared (prefixed) one, make sure the override logic
|
||||
// works correctly.
|
||||
func TestImageOptionsAuthfileOverride(t *testing.T) {
|
||||
|
||||
for _, testCase := range []struct {
|
||||
flagPrefix string
|
||||
cmdFlags []string
|
||||
expectedAuthfilePath string
|
||||
}{
|
||||
// if there is no prefix, only authfile is allowed.
|
||||
{"",
|
||||
[]string{
|
||||
"--authfile", "/srv/authfile",
|
||||
}, "/srv/authfile"},
|
||||
// if authfile and dest-authfile is provided, dest-authfile wins
|
||||
{"dest-",
|
||||
[]string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-authfile", "/srv/dest-authfile",
|
||||
}, "/srv/dest-authfile",
|
||||
},
|
||||
// if only the shared authfile is provided, authfile must be present in system context
|
||||
{"dest-",
|
||||
[]string{
|
||||
"--authfile", "/srv/authfile",
|
||||
}, "/srv/authfile",
|
||||
},
|
||||
// if only the dest authfile is provided, dest-authfile must be present in system context
|
||||
{"dest-",
|
||||
[]string{
|
||||
"--dest-authfile", "/srv/dest-authfile",
|
||||
}, "/srv/dest-authfile",
|
||||
},
|
||||
} {
|
||||
opts := fakeImageOptions(t, testCase.flagPrefix, false, []string{}, testCase.cmdFlags)
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
AuthFilePath: testCase.expectedAuthfilePath,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,210 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
: ${PROG:=$(basename ${BASH_SOURCE})}
|
||||
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" -o "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
COMPREPLY=( $( compgen -W "$transports" -- "$cur" ) )
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
${PROG} $subcommand --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--format -f
|
||||
--sign-by
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-dir
|
||||
--dest-ostree-tmp-dir
|
||||
--dest-tls-verify
|
||||
--src-daemon-host
|
||||
--dest-daemon-host
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--dest-compress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
--dest-oci-accept-uncompressed-layers
|
||||
"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_standalone_verify() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_manifest_digest() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_delete() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--command-timeout
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
commands=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
|
||||
case "$prev" in
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur opts base
|
||||
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
local command=${PROG} cpos=0
|
||||
local counter=1
|
||||
counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
declare -F $completions_func >/dev/null && $completions_func
|
||||
|
||||
eval "$previous_extglob_setting"
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete $PROG
|
||||
145
contrib/cirrus/runner.sh
Executable file
145
contrib/cirrus/runner.sh
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is intended to be executed by automation or humans
|
||||
# under a hack/get_ci_vm.sh context. Use under any other circumstances
|
||||
# is unlikely to function.
|
||||
|
||||
set -e
|
||||
|
||||
# BEGIN Global export of all variables
|
||||
set -a
|
||||
|
||||
# Due to differences across platforms and runtime execution environments,
|
||||
# handling of the (otherwise) default shell setup is non-uniform. Rather
|
||||
# than attempt to workaround differences, simply force-load/set required
|
||||
# items every time this library is utilized.
|
||||
USER="$(whoami)"
|
||||
HOME="$(getent passwd $USER | cut -d : -f 6)"
|
||||
# Some platforms set and make this read-only
|
||||
[[ -n "$UID" ]] || \
|
||||
UID=$(getent passwd $USER | cut -d : -f 3)
|
||||
|
||||
if [[ -r "/etc/automation_environment" ]]; then
|
||||
source /etc/automation_environment
|
||||
source $AUTOMATION_LIB_PATH/common_lib.sh
|
||||
else
|
||||
(
|
||||
echo "WARNING: It does not appear that containers/automation was installed."
|
||||
echo " Functionality of most of ${BASH_SOURCE[0]} will be negatively"
|
||||
echo " impacted."
|
||||
) > /dev/stderr
|
||||
fi
|
||||
|
||||
# This is the magic interpreted by the tests to allow modifying local config/services.
|
||||
SKOPEO_CONTAINER_TESTS=1
|
||||
|
||||
PATH=$PATH:$GOPATH/bin
|
||||
|
||||
# END Global export of all variables
|
||||
set +a
|
||||
|
||||
|
||||
_run_setup() {
|
||||
local mnt
|
||||
local errmsg
|
||||
req_env_vars SKOPEO_CIDEV_CONTAINER_FQIN
|
||||
if [[ "$OS_RELEASE_ID" != "fedora" ]]; then
|
||||
die "Unknown/unsupported distro. $OS_REL_VER"
|
||||
fi
|
||||
|
||||
if [[ -r "/.ci_setup_complete" ]]; then
|
||||
warn "Thwarted an attempt to execute setup more than once."
|
||||
return
|
||||
fi
|
||||
|
||||
# VM's come with the distro. skopeo package pre-installed
|
||||
dnf erase -y skopeo
|
||||
|
||||
# Required for testing the SIF transport
|
||||
dnf install -y fakeroot squashfs-tools
|
||||
|
||||
msg "Removing systemd-resolved from nsswitch.conf"
|
||||
# /etc/resolv.conf is already set to bypass systemd-resolvd
|
||||
sed -i -r -e 's/^(hosts.+)resolve.+dns/\1dns/' /etc/nsswitch.conf
|
||||
|
||||
# A slew of compiled binaries are pre-built and distributed
|
||||
# within the CI/Dev container image, but we want to run
|
||||
# things directly on the host VM. Fortunately they're all
|
||||
# located in the container under /usr/local/bin
|
||||
msg "Accessing contents of $SKOPEO_CIDEV_CONTAINER_FQIN"
|
||||
podman pull --quiet $SKOPEO_CIDEV_CONTAINER_FQIN
|
||||
mnt=$(podman mount $(podman create $SKOPEO_CIDEV_CONTAINER_FQIN))
|
||||
|
||||
# The container and VM images are built in tandem in the same repo.
|
||||
# automation, but the sources are in different directories. It's
|
||||
# possible for a mismatch to happen, but should (hopefully) be unlikely.
|
||||
# Double-check to make sure.
|
||||
if ! fgrep -qx "ID=$OS_RELEASE_ID" $mnt/etc/os-release || \
|
||||
! fgrep -qx "VERSION_ID=$OS_RELEASE_VER" $mnt/etc/os-release; then
|
||||
die "Somehow $SKOPEO_CIDEV_CONTAINER_FQIN is not based on $OS_REL_VER."
|
||||
fi
|
||||
msg "Copying test binaries from $SKOPEO_CIDEV_CONTAINER_FQIN /usr/local/bin/"
|
||||
cp -a "$mnt/usr/local/bin/"* "/usr/local/bin/"
|
||||
msg "Configuring the openshift registry"
|
||||
|
||||
# TODO: Put directory & yaml into more sensible place + update integration tests
|
||||
mkdir -vp /registry
|
||||
cp -a "$mnt/atomic-registry-config.yml" /
|
||||
|
||||
msg "Cleaning up"
|
||||
podman umount --latest
|
||||
podman rm --latest
|
||||
|
||||
# Ensure setup can only run once
|
||||
touch "/.ci_setup_complete"
|
||||
}
|
||||
|
||||
_run_vendor() {
|
||||
make vendor BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_build() {
|
||||
make bin/skopeo BUILDTAGS="$BUILDTAGS"
|
||||
make install PREFIX=/usr/local
|
||||
}
|
||||
|
||||
_run_cross() {
|
||||
make local-cross BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_doccheck() {
|
||||
make validate-docs BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_unit() {
|
||||
make test-unit-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_integration() {
|
||||
# Ensure we start with a clean-slate
|
||||
podman system reset --force
|
||||
|
||||
make test-integration-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_system() {
|
||||
# Ensure we start with a clean-slate
|
||||
podman system reset --force
|
||||
|
||||
# Executes with containers required for testing.
|
||||
make test-system-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
req_env_vars SKOPEO_PATH BUILDTAGS
|
||||
|
||||
handler="_run_${1}"
|
||||
if [ "$(type -t $handler)" != "function" ]; then
|
||||
die "Unknown/Unsupported command-line argument '$1'"
|
||||
fi
|
||||
|
||||
msg "************************************************************"
|
||||
msg "Runner executing $1 on $OS_REL_VER"
|
||||
msg "************************************************************"
|
||||
|
||||
cd "$SKOPEO_PATH"
|
||||
$handler
|
||||
@@ -1,60 +0,0 @@
|
||||
% storage.conf(5) Container Storage Configuration File
|
||||
% Dan Walsh
|
||||
% May 2017
|
||||
|
||||
# NAME
|
||||
storage.conf - Syntax of Container Storage configuration file
|
||||
|
||||
# DESCRIPTION
|
||||
The STORAGE configuration file specifies all of the available container storage options
|
||||
for tools using shared container storage.
|
||||
|
||||
# FORMAT
|
||||
The [TOML format][toml] is used as the encoding of the configuration file.
|
||||
Every option and subtable listed here is nested under a global "storage" table.
|
||||
No bare options are used. The format of TOML can be simplified to:
|
||||
|
||||
[table]
|
||||
option = value
|
||||
|
||||
[table.subtable1]
|
||||
option = value
|
||||
|
||||
[table.subtable2]
|
||||
option = value
|
||||
|
||||
## STORAGE TABLE
|
||||
|
||||
The `storage` table supports the following options:
|
||||
|
||||
**graphroot**=""
|
||||
container storage graph dir (default: "/var/lib/containers/storage")
|
||||
Default directory to store all writable content created by container storage programs.
|
||||
|
||||
**runroot**=""
|
||||
container storage run dir (default: "/var/run/containers/storage")
|
||||
Default directory to store all temporary writable content created by container storage programs.
|
||||
|
||||
**driver**=""
|
||||
container storage driver (default is "overlay")
|
||||
Default Copy On Write (COW) container storage driver.
|
||||
|
||||
### STORAGE OPTIONS TABLE
|
||||
|
||||
The `storage.options` table supports the following options:
|
||||
|
||||
**additionalimagestores**=[]
|
||||
Paths to additional container image stores. Usually these are read-only and stored on remote network shares.
|
||||
|
||||
**size**=""
|
||||
Maximum size of a container image. Default is 10GB. This flag can be used to set quota
|
||||
on the size of container images.
|
||||
|
||||
**override_kernel_check**=""
|
||||
Tell storage drivers to ignore kernel version checks. Some storage drivers assume that if a kernel is too
|
||||
old, the driver is not supported. But for kernels that have had the drivers backported, this flag
|
||||
allows users to override the checks.
|
||||
|
||||
# HISTORY
|
||||
May 2017, Originally compiled by Dan Walsh <dwalsh@redhat.com>
|
||||
Format copied from crio.conf man page created by Aleksa Sarai <asarai@suse.de>
|
||||
57
contrib/skopeoimage/README.md
Normal file
57
contrib/skopeoimage/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
# skopeoimage
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains the Containerfiles necessary to create the skopeoimage container
|
||||
images that are housed on quay.io under the skopeo account. All repositories where
|
||||
the images live are public and can be pulled without credentials. These container images are secured and the
|
||||
resulting containers can run safely with privileges within the container.
|
||||
|
||||
The container images are built using the latest Fedora and then Skopeo is installed into them.
|
||||
The PATH in the container images is set to the default PATH provided by Fedora. Also, the
|
||||
ENTRYPOINT and the WORKDIR variables are not set within these container images, as such they
|
||||
default to `/`.
|
||||
|
||||
The container images are:
|
||||
|
||||
* `quay.io/containers/skopeo:v<version>` and `quay.io/skopeo/stable:v<version>` -
|
||||
These images are built daily. These images are intended contain an unchanging
|
||||
and stable version of skopeo. For the most recent `<version>` tags (`vX`,
|
||||
`vX.Y`, and `vX.Y.Z`) the image contents will be updated daily to incorporate
|
||||
(especially) security updates. For build details, please[see the configuration
|
||||
file](stable/Containerfile).
|
||||
* `quay.io/containers/skopeo:latest` and `quay.io/skopeo/stable:latest` -
|
||||
Built daily using the same Containerfile as above. The skopeo version
|
||||
will remain the "latest" available in Fedora, however the other image
|
||||
contents may vary compared to the version-tagged images.
|
||||
* `quay.io/skopeo/testing:latest` - This image is built daily, using the
|
||||
latest version of Skopeo that was in the Fedora `updates-testing` repository.
|
||||
The image is Built with [the testing Containerfile](testing/Containerfile).
|
||||
* `quay.io/skopeo/upstream:latest` - This image is built daily using the latest
|
||||
code found in this GitHub repository. Due to the image changing frequently,
|
||||
it's not guaranteed to be stable or even executable. The image is built with
|
||||
[the upstream Containerfile](upstream/Containerfile).
|
||||
|
||||
|
||||
## Sample Usage
|
||||
|
||||
Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
|
||||
|
||||
```
|
||||
# Get Help on Skopeo
|
||||
podman run docker://quay.io/skopeo/stable:latest --help
|
||||
|
||||
# Get help on the Skopeo Copy command
|
||||
podman run docker://quay.io/skopeo/stable:latest copy --help
|
||||
|
||||
# Copy the Skopeo container image from quay.io to
|
||||
# a private registry
|
||||
podman run docker://quay.io/skopeo/stable:latest copy docker://quay.io/skopeo/stable docker://registry.internal.company.com/skopeo
|
||||
|
||||
# Inspect the fedora:latest image
|
||||
podman run docker://quay.io/skopeo/stable:latest inspect --config docker://registry.fedoraproject.org/fedora:latest | jq
|
||||
```
|
||||
47
contrib/skopeoimage/stable/Containerfile
Normal file
47
contrib/skopeoimage/stable/Containerfile
Normal file
@@ -0,0 +1,47 @@
|
||||
# stable/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# stable version of Skopeo on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install skopeo fuse-overlayfs \
|
||||
--exclude container-selinux && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
49
contrib/skopeoimage/testing/Containerfile
Normal file
49
contrib/skopeoimage/testing/Containerfile
Normal file
@@ -0,0 +1,49 @@
|
||||
# testing/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# version of Skopeo that is in updates-testing
|
||||
# on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install skopeo fuse-overlayfs \
|
||||
--exclude container-selinux \
|
||||
--enablerepo updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
66
contrib/skopeoimage/upstream/Containerfile
Normal file
66
contrib/skopeoimage/upstream/Containerfile
Normal file
@@ -0,0 +1,66 @@
|
||||
# upstream/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# upstream version of Skopeo on GitHub.
|
||||
# https://github.com/containers/skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y --enablerepo updates-testing --exclude container-selinux install \
|
||||
make \
|
||||
golang \
|
||||
git \
|
||||
go-md2man \
|
||||
fuse-overlayfs \
|
||||
fuse3 \
|
||||
containers-common \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel && \
|
||||
mkdir /root/skopeo && \
|
||||
git clone https://github.com/containers/skopeo \
|
||||
/root/skopeo/src/github.com/containers/skopeo && \
|
||||
export GOPATH=/root/skopeo && \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo && \
|
||||
make bin/skopeo && \
|
||||
make PREFIX=/usr install && \
|
||||
rm -rf /root/skopeo/* && \
|
||||
dnf -y remove git golang go-md2man make && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
@@ -1,28 +0,0 @@
|
||||
# storage.conf is the configuration file for all tools
|
||||
# that share the containers/storage libraries
|
||||
# See man 5 containers-storage.conf for more information
|
||||
|
||||
# The "container storage" table contains all of the server options.
|
||||
[storage]
|
||||
|
||||
# Default Storage Driver
|
||||
driver = "overlay"
|
||||
|
||||
# Temporary storage location
|
||||
runroot = "/var/run/containers/storage"
|
||||
|
||||
# Primary read-write location of container storage
|
||||
graphroot = "/var/lib/containers/storage"
|
||||
|
||||
[storage.options]
|
||||
# AdditionalImageStores is used to pass paths to additional read-only image stores
|
||||
# Must be comma separated list.
|
||||
additionalimagestores = [
|
||||
]
|
||||
|
||||
# Size is used to set a maximum size of the container image. Only supported by
|
||||
# certain container storage drivers (currently overlay, zfs, vfs, btrfs)
|
||||
size = ""
|
||||
|
||||
# OverrideKernelCheck tells the driver to ignore kernel checks based on kernel version
|
||||
override_kernel_check = "true"
|
||||
16
default.yaml
16
default.yaml
@@ -1,19 +1,19 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# sigstore: indicates a location that is read and write
|
||||
# sigstore-staging: indicates a location that is only for write
|
||||
# lookaside: indicates a location that is read and write
|
||||
# lookaside-staging: indicates a location that is only for write
|
||||
#
|
||||
# sigstore and sigstore-staging take a value of the following:
|
||||
# sigstore: {schema}://location
|
||||
# lookaside and lookaside-staging take a value of the following:
|
||||
# lookaside: {schema}://location
|
||||
#
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# This is the default signature write location for docker registries.
|
||||
default-docker:
|
||||
# sigstore: file:///var/lib/atomic/sigstore
|
||||
sigstore-staging: file:///var/lib/atomic/sigstore
|
||||
# lookaside: file:///var/lib/containers/sigstore
|
||||
lookaside-staging: file:///var/lib/containers/sigstore
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
@@ -21,6 +21,6 @@ default-docker:
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# sigstore: http://privateregistry.com/sigstore/
|
||||
# sigstore-staging: /mnt/nfs/privateregistry/sigstore
|
||||
# lookaside: http://privateregistry.com/sigstore/
|
||||
# lookaside-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
**skopeo copy** [*options*] _source-image_ _destination-image_
|
||||
|
||||
## DESCRIPTION
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
@@ -15,51 +15,209 @@ Uses the system's trust policy to validate images, rejects images not trusted by
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
_source-image_ and _destination-image_ are interpreted completely independently; e.g. the destination name does not
|
||||
automatically inherit any parts of the source name.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--additional-tag**=_strings_
|
||||
|
||||
Additional tags (supports docker-archive).
|
||||
|
||||
**--all**, **-a**
|
||||
|
||||
If _source-image_ refers to a list of images, instead of copying just the image which matches the current OS and
|
||||
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
|
||||
the images in the list, and the list itself.
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
|
||||
environment variable. `export REGISTRY_AUTH_FILE=path`
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
**--src-authfile** _path_
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
Path of the authentication file for the source registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
**--dest-authfile** _path_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
**--dest-shared-blob-dir** _directory_
|
||||
|
||||
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
**--digestfile** _path_
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
**--preserve-digests**
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
**--encrypt-layer** _ints_
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
**--format**, **-f** _manifest-type_
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
**--help**, **-h**
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
Print usage statement
|
||||
|
||||
**--multi-arch** _option_
|
||||
|
||||
Control what is copied if _source-image_ refers to a multi-architecture image. Default is system.
|
||||
|
||||
Options:
|
||||
- system: Copy only the image that matches the system architecture
|
||||
- all: Copy the full multi-architecture image
|
||||
- index-only: Copy only the index
|
||||
|
||||
The index-only option usually fails unless the referenced per-architecture images are already present in the destination, or the target registry supports sparse indexes.
|
||||
|
||||
**--quiet**, **-q**
|
||||
|
||||
Suppress output information when copying images.
|
||||
|
||||
**--remove-signatures**
|
||||
|
||||
Do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by** _key-id_
|
||||
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-passphrase-file** _path_
|
||||
|
||||
The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-private-key`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--sign-identity** _reference_
|
||||
|
||||
The identity to use when signing the image. The identity must be a fully specified docker reference. If the identity is not specified, the target docker reference will be used.
|
||||
|
||||
**--src-shared-blob-dir** _directory_
|
||||
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--encryption-key** _protocol:keyfile_
|
||||
|
||||
Specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
|
||||
|
||||
**--decryption-key** _key[:passphrase]_
|
||||
|
||||
Key to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
|
||||
|
||||
**--src-creds** _username[:password]_
|
||||
|
||||
Credentials for accessing the source registry.
|
||||
|
||||
**--dest-compress**
|
||||
|
||||
Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
|
||||
**--dest-decompress**
|
||||
|
||||
Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
|
||||
**--dest-oci-accept-uncompressed-layers**
|
||||
|
||||
Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
|
||||
|
||||
**--dest-creds** _username[:password]_
|
||||
|
||||
Credentials for accessing the destination registry.
|
||||
|
||||
**--src-cert-dir** _path_
|
||||
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
|
||||
|
||||
**--src-no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to container source registry or daemon. Default to source registry setting.
|
||||
|
||||
**--dest-cert-dir** _path_
|
||||
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
|
||||
|
||||
**--dest-no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--dest-tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to container destination registry or daemon. Default to destination registry setting.
|
||||
|
||||
**--src-daemon-host** _host_
|
||||
|
||||
Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_
|
||||
|
||||
Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
**--dest-compress-format** _format_
|
||||
|
||||
Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
|
||||
|
||||
**--dest-compress-level** _format_
|
||||
|
||||
Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
|
||||
|
||||
**--src-registry-token** _token_
|
||||
|
||||
Bearer token for accessing the source registry.
|
||||
|
||||
**--dest-registry-token** _token_
|
||||
|
||||
Bearer token for accessing the destination registry.
|
||||
|
||||
**--dest-precompute-digests**
|
||||
|
||||
Precompute digests to ensure layers are not uploaded that already exist on the destination registry. Layers with initially unknown digests (ex. compressing "on the fly") will be temporarily streamed to disk.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--src-username**
|
||||
|
||||
The username to access the source registry.
|
||||
|
||||
**--src-password**
|
||||
|
||||
The password to access the source registry.
|
||||
|
||||
**--dest-username**
|
||||
|
||||
The username to access the destination registry.
|
||||
|
||||
**--dest-password**
|
||||
|
||||
The password to access the destination registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To just copy an image from one registry to another:
|
||||
```sh
|
||||
$ skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest
|
||||
```
|
||||
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
@@ -73,13 +231,44 @@ $ ls /var/lib/images/busybox/*
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
# skopeo copy --sign-by dev@example.com containers-storage:example/busybox:streaming docker://example/busybox:gold
|
||||
```
|
||||
|
||||
To encrypt an image:
|
||||
```sh
|
||||
skopeo copy docker://docker.io/library/nginx:1.17.8 oci:local_nginx:1.17.8
|
||||
|
||||
openssl genrsa -out private.key 1024
|
||||
openssl rsa -in private.key -pubout > public.key
|
||||
|
||||
skopeo copy --encryption-key jwe:./public.key oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```
|
||||
|
||||
To copy encrypted image without decryption:
|
||||
```sh
|
||||
skopeo copy oci:try-encrypt:encrypted oci:try-encrypt-copy:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image that requires more than one key:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private1.key --decryption-key ./private2.key --decryption-key ./private3.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```
|
||||
|
||||
Container images can also be partially encrypted by specifying the index of the layer. Layers are 0-indexed indices, with support for negative indexing. i.e. 0 is the first layer, -1 is the last layer.
|
||||
|
||||
Let's say out of 3 layers that the image `docker.io/library/nginx:1.17.8` is made up of, we only want to encrypt the 2nd layer,
|
||||
```sh
|
||||
skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5), containers-signature(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -1,52 +1,99 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garbage collector.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
**skopeo delete** [*options*] _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
## DESCRIPTION
|
||||
|
||||
Mark _image-name_ for deletion.
|
||||
The effect of this is registry-specific; many registries don’t support this operation, or don’t allow it in some circumstances / configurations.
|
||||
|
||||
**WARNING**: If _image-name_ contains a digest, this affects the referenced manifest, and may delete all tags (within the current repository?) pointing to that manifest.
|
||||
|
||||
**WARNING**: If _image-name_ contains a tag (but not a digest), in the current version of Skopeo this resolves the tag into a digest, and then deletes the manifest by digest, as described above (possibly deleting all tags pointing to that manifest, not just the provided tag). This behavior may change in the future.
|
||||
|
||||
|
||||
When using the github.com/distribution/distribution registry server:
|
||||
To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
```
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
**--creds** _username[:password]_
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
Credentials for accessing the registry.
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
**--cert-dir** _path_
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
|
||||
|
||||
**--daemon-host** _host_
|
||||
|
||||
Use docker daemon host at _host_ (`docker-daemon:` transport only)
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
**--registry-token** _token_
|
||||
|
||||
Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
**--username**
|
||||
|
||||
The username to access the registry.
|
||||
|
||||
**--password**
|
||||
|
||||
The password to access the registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
$ skopeo delete docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -1,37 +1,88 @@
|
||||
% skopeo-inspect(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [**--raw**] [**--config**] _image-name_
|
||||
**skopeo inspect** [*options*] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
## DESCRIPTION
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
Return low-level information about _image-name_ in a registry.
|
||||
See [skopeo(1)](skopeo.1.md) for the format of _image-name_.
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
The default output includes data from various sources: user input (**Name**), the remote repository, if any (**RepoTags**), the top-level manifest (**Digest**),
|
||||
and a per-architecture/OS image matching the current run-time environment (most other values).
|
||||
To see values for a different architecture/OS, use the **--override-os** / **--override-arch** options documented in [skopeo(1)](skopeo.1.md).
|
||||
|
||||
**--config** output configuration in OCI format, default is to format in JSON
|
||||
## OPTIONS
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
**--authfile** _path_
|
||||
|
||||
**--config** **--raw** output configuration in raw format
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
**--cert-dir** _path_
|
||||
|
||||
**--authfile** _path_
|
||||
Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
**--config**
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
Output configuration in OCI format, default is to format in JSON format.
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
|
||||
**--creds** _username[:password]_
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
Username and password for accessing the registry.
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
**--daemon-host** _host_
|
||||
|
||||
Use docker daemon host at _host_ (`docker-daemon:` transport only)
|
||||
|
||||
**--format**, **-f**=*format*
|
||||
|
||||
Format the output using the given Go template.
|
||||
The keys of the returned JSON can be used as the values for the --format flag (see examples below).
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--raw**
|
||||
|
||||
Output raw manifest or config data depending on --config option.
|
||||
The --format option is not supported with --raw option.
|
||||
|
||||
**--registry-token** _Bearer token_
|
||||
|
||||
Registry token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
**--username**
|
||||
|
||||
The username to access the registry.
|
||||
|
||||
**--password**
|
||||
|
||||
The password to access the registry.
|
||||
|
||||
**--no-tags**, **-n**
|
||||
|
||||
Do not list the available tags from the repository in the output. When `true`, the `RepoTags` array will be empty. Defaults to `false`, which includes all available tags.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -42,14 +93,14 @@ $ skopeo inspect docker://docker.io/fedora
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
@@ -57,15 +108,60 @@ $ skopeo inspect docker://docker.io/fedora
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
To inspect python from the docker.io registry and not show the available tags:
|
||||
```sh
|
||||
$ skopeo inspect --no-tags docker://docker.io/library/python
|
||||
{
|
||||
"Name": "docker.io/library/python",
|
||||
"Digest": "sha256:5ca194a80ddff913ea49c8154f38da66a41d2b73028c5cf7e46bc3c1d6fda572",
|
||||
"RepoTags": [],
|
||||
"Created": "2021-10-05T23:40:54.936108045Z",
|
||||
"DockerVersion": "20.10.7",
|
||||
"Labels": null,
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:df5590a8898bedd76f02205dc8caa5cc9863267dbcd8aac038bcd212688c1cc7",
|
||||
"sha256:705bb4cb554eb7751fd21a994f6f32aee582fbe5ea43037db6c43d321763992b",
|
||||
"sha256:519df5fceacdeaadeec563397b1d9f4d7c29c9f6eff879739cab6f0c144f49e1",
|
||||
"sha256:ccc287cbeddc96a0772397ca00ec85482a7b7f9a9fac643bfddd87b932f743db",
|
||||
"sha256:e3f8e6af58ed3a502f0c3c15dce636d9d362a742eb5b67770d0cfcb72f3a9884",
|
||||
"sha256:aebed27b2d86a5a3a2cbe186247911047a7e432b9d17daad8f226597c0ea4276",
|
||||
"sha256:54c32182bdcc3041bf64077428467109a70115888d03f7757dcf614ff6d95ebe",
|
||||
"sha256:cc8b7caedab13af07adf4836e13af2d4e9e54d794129b0fd4c83ece6b1112e86",
|
||||
"sha256:462c3718af1d5cdc050cfba102d06c26f78fe3b738ce2ca2eb248034b1738945"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"LANG=C.UTF-8",
|
||||
"GPG_KEY=A035C8C19219BA821ECEA86B64E628F8D684696D",
|
||||
"PYTHON_VERSION=3.10.0",
|
||||
"PYTHON_PIP_VERSION=21.2.4",
|
||||
"PYTHON_SETUPTOOLS_VERSION=57.5.0",
|
||||
"PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/d781367b97acf0ece7e9e304bf281e99b618bf10/public/get-pip.py",
|
||||
"PYTHON_GET_PIP_SHA256=01249aa3e58ffb3e1686b7141b4e9aac4d398ef4ac3012ed9dff8dd9f685ffe0"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
$ /bin/skopeo inspect --config docker://registry.fedoraproject.org/fedora --format "{{ .Architecture }}"
|
||||
amd64
|
||||
```
|
||||
|
||||
```
|
||||
$ /bin/skopeo inspect --format '{{ .Env }}' docker://registry.access.redhat.com/ubi8
|
||||
[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin container=oci]
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
171
docs/skopeo-list-tags.1.md
Normal file
171
docs/skopeo-list-tags.1.md
Normal file
@@ -0,0 +1,171 @@
|
||||
% skopeo-list-tags(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-list\-tags - List image names in a transport-specific collection of images.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo list-tags** [*options*] _source-image_
|
||||
|
||||
Return a list of tags from _source-image_ in a registry or a local docker-archive file.
|
||||
|
||||
_source-image_ name of the repository to retrieve a tag listing from or a local docker-archive file.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry.
|
||||
|
||||
**--cert-dir** _path_
|
||||
|
||||
Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--registry-token** _Bearer token_
|
||||
|
||||
Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
**--username**
|
||||
|
||||
The username to access the registry.
|
||||
|
||||
**--password**
|
||||
|
||||
The password to access the registry.
|
||||
|
||||
## REPOSITORY NAMES
|
||||
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags".
|
||||
|
||||
This commands refers to repositories using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
**docker://**_docker-repository-reference_
|
||||
A repository in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(skopeo login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
A _docker-repository-reference_ is of the form: **registryhost:port/repositoryname** which is similar to an _image-reference_ but with no tag or digest allowed as the last component (e.g no `:latest` or `@sha256:xyz`)
|
||||
|
||||
Examples of valid docker-repository-references:
|
||||
"docker.io/myuser/myrepo"
|
||||
"docker.io/nginx"
|
||||
"docker.io/library/fedora"
|
||||
"localhost:5000/myrepository"
|
||||
|
||||
Examples of invalid references:
|
||||
"docker.io/nginx:latest"
|
||||
"docker.io/myuser/myimage:v1.0"
|
||||
"docker.io/myuser/myimage@sha256:f48c4cc192f4c3c6a069cb5cca6d0a9e34d6076ba7c214fd0cc3ca60e0af76bb"
|
||||
|
||||
**docker-archive:path[:docker-reference]
|
||||
more than one images were stored in a docker save-formatted file.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
### Docker Transport
|
||||
To get the list of tags in the "fedora" repository from the docker.io registry (the repository name expands to "library/fedora" per docker transport canonical form):
|
||||
```sh
|
||||
$ skopeo list-tags docker://docker.io/fedora
|
||||
{
|
||||
"Repository": "docker.io/library/fedora",
|
||||
"Tags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"25",
|
||||
"26-modular",
|
||||
"26",
|
||||
"27",
|
||||
"28",
|
||||
"29",
|
||||
"30",
|
||||
"31",
|
||||
"32",
|
||||
"branched",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"modular",
|
||||
"rawhide"
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To list the tags in a local host docker/distribution registry on port 5000, in this case for the "fedora" repository:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker://localhost:5000/fedora
|
||||
{
|
||||
"Repository": "localhost:5000/fedora",
|
||||
"Tags": [
|
||||
"latest",
|
||||
"30",
|
||||
"31"
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Docker-archive Transport
|
||||
|
||||
To list the tags in a local docker-archive file:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"busybox:1.28.3"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Also supports more than one tags in an archive:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"example.com/empty:latest",
|
||||
"example.com/empty/but:different"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Will include a source-index entry for each untagged image:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/four-tags-with-an-untag.tar
|
||||
{
|
||||
"Tags": [
|
||||
"image1:tag1",
|
||||
"image2:tag2",
|
||||
"@2",
|
||||
"image4:tag4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-transports(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Zach Hill <zach@anchore.com>
|
||||
103
docs/skopeo-login.1.md
Normal file
103
docs/skopeo-login.1.md
Normal file
@@ -0,0 +1,103 @@
|
||||
% skopeo-login(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-login - Login to a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo login** [*options*] _registry_
|
||||
|
||||
## DESCRIPTION
|
||||
**skopeo login** logs into a specified registry server with the correct username
|
||||
and password. **skopeo login** reads in the username and password from STDIN.
|
||||
The username and password can also be set using the **username** and **password** flags.
|
||||
The path of the authentication file can be specified by the user by setting the **authfile**
|
||||
flag. The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--password**, **-p**=*password*
|
||||
|
||||
Password for registry
|
||||
|
||||
**--password-stdin**
|
||||
|
||||
Take the password from stdin
|
||||
|
||||
**--username**, **-u**=*username*
|
||||
|
||||
Username for registry
|
||||
|
||||
**--authfile**=*path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json
|
||||
|
||||
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
|
||||
environment variable. `export REGISTRY_AUTH_FILE=path`
|
||||
|
||||
**--get-login**
|
||||
|
||||
Return the logged-in user for the registry. Return error if no login is found.
|
||||
|
||||
**--cert-dir**=*path*
|
||||
|
||||
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
**--verbose**, **-v**
|
||||
|
||||
Write more detailed information to stdout
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
$ skopeo login docker.io
|
||||
Username: testuser
|
||||
Password:
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login -u testuser -p testpassword localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login --authfile authdir/myauths.json docker.io
|
||||
Username: testuser
|
||||
Password:
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login --tls-verify=false -u test -p test localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo login -u testuser --password-stdin < testpassword.txt docker.io
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
$ echo $testpassword | skopeo login -u testuser --password-stdin docker.io
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-logout(1), containers-auth.json(5), containers-registries.conf(5), containers-certs.d.5.md
|
||||
|
||||
## HISTORY
|
||||
May 2020, Originally compiled by Qi Wang <qiwan@redhat.com>
|
||||
57
docs/skopeo-logout.1.md
Normal file
57
docs/skopeo-logout.1.md
Normal file
@@ -0,0 +1,57 @@
|
||||
% skopeo-logout(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-logout - Logout of a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo logout** [*options*] _registry_
|
||||
|
||||
## DESCRIPTION
|
||||
**skopeo logout** logs out of a specified registry server by deleting the cached credentials
|
||||
stored in the **auth.json** file. The path of the authentication file can be overridden by the user by setting the **authfile** flag.
|
||||
The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
|
||||
All the cached credentials can be removed by setting the **all** flag.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile**=*path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json
|
||||
|
||||
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
|
||||
environment variable. `export REGISTRY_AUTH_FILE=path`
|
||||
|
||||
**--all**, **-a**
|
||||
|
||||
Remove the cached credentials for all registries in the auth file
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
$ skopeo logout docker.io
|
||||
Remove login credentials for docker.io
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo logout --authfile authdir/myauths.json docker.io
|
||||
Remove login credentials for docker.io
|
||||
```
|
||||
|
||||
```
|
||||
$ skopeo logout --all
|
||||
Remove login credentials for all registries
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-login(1), containers-auth.json(5)
|
||||
|
||||
## HISTORY
|
||||
May 2020, Originally compiled by Qi Wang <qiwan@redhat.com>
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-manifest-digest(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write it to standard output.
|
||||
skopeo\-manifest\-digest - Compute a manifest digest for a manifest-file and write it to standard output.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
@@ -10,6 +10,12 @@ skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write i
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
@@ -23,4 +29,3 @@ skopeo(1)
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
% skopeo-standalone-sign(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone-sign - Simple Sign an image
|
||||
skopeo\-standalone-sign - Debugging tool - Publish and sign an image in one step.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
**skopeo standalone-sign** [*options*] _manifest_ _docker-reference_ _key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
This is primarily a debugging tool, useful for special cases, and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
@@ -16,7 +15,19 @@ and usually should not be a part of your normal operational workflow; use `skope
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
## OPTIONS
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--output**, **-o** _output file_
|
||||
|
||||
Write signature to _output file_.
|
||||
|
||||
**--passphrase-file**=_path_
|
||||
|
||||
The passphare to use when signing with the key ID from `--sign-by`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -25,10 +36,13 @@ $ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busy
|
||||
$
|
||||
```
|
||||
|
||||
## NOTES
|
||||
|
||||
This command is intended for use with local signatures e.g. OpenPGP ( other signature formats may be added in the future ), as per containers-signature(5). Furthermore, this command does **not** interact with the artifacts generated by Docker Content Trust (DCT). For more information, please see [containers-signature(5)](https://github.com/containers/image/blob/main/docs/containers-signature.5.md).
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-copy(1)
|
||||
skopeo(1), skopeo-copy(1), containers-signature(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
% skopeo-standalone-verify(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone\-verify - Verify an image signature
|
||||
skopeo\-standalone\-verify - Verify an image signature.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
**skopeo standalone-verify** _manifest_ _docker-reference_ _key-fingerprint_ _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
Verify a signature using local files; the digest will be printed on success. This is primarily a debugging tool, useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow. Additionally, consider configuring a signature verification policy file,
|
||||
as per containers-policy.json(5).
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
@@ -20,6 +22,12 @@ Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
@@ -27,10 +35,13 @@ $ skopeo standalone-verify busybox-manifest.json registry.example.com/example/bu
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
## NOTES
|
||||
|
||||
This command is intended for use with local signatures e.g. OpenPGP ( other signature formats may be added in the future ), as per containers-signature(5). Furthermore, this command does **not** interact with the artifacts generated by Docker Content Trust (DCT). For more information, please see [containers-signature(5)](https://github.com/containers/image/blob/main/docs/containers-signature.5.md).
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
skopeo(1), containers-signature(5), containers-policy.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
233
docs/skopeo-sync.1.md
Normal file
233
docs/skopeo-sync.1.md
Normal file
@@ -0,0 +1,233 @@
|
||||
% skopeo-sync(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-sync - Synchronize images between container registries and local directories.
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between container registries and local directories.
|
||||
The synchronization is achieved by copying all the images found at _source_ to _destination_.
|
||||
|
||||
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
|
||||
|
||||
Differently from other skopeo commands, skopeo sync requires both source and destination transports to be specified separately from _source_ and _destination_.
|
||||
One of the problems of prefixing a destination with its transport is that, the registry `docker://hostname:port` would be wrongly interpreted as an image reference at a non-fully qualified registry, with `hostname` and `port` the image name and tag.
|
||||
|
||||
Available _source_ transports:
|
||||
- _docker_ (i.e. `--src docker`): _source_ is a repository hosted on a container registry (e.g.: `registry.example.com/busybox`).
|
||||
If no image tag is specified, skopeo sync copies all the tags found in that repository.
|
||||
- _dir_ (i.e. `--src dir`): _source_ is a local directory path (e.g.: `/media/usb/`). Refer to skopeo(1) **dir:**_path_ for the local image format.
|
||||
- _yaml_ (i.e. `--src yaml`): _source_ is local YAML file path.
|
||||
The YAML file should specify the list of images copied from different container registries (local directories are not supported). Refer to EXAMPLES for the file format.
|
||||
|
||||
Available _destination_ transports:
|
||||
- _docker_ (i.e. `--dest docker`): _destination_ is a container registry (e.g.: `my-registry.local.lan`).
|
||||
- _dir_ (i.e. `--dest dir`): _destination_ is a local directory path (e.g.: `/media/usb/`).
|
||||
One directory per source 'image:tag' is created for each copied image.
|
||||
|
||||
When the `--scoped` option is specified, images are prefixed with the source image path so that multiple images with the same
|
||||
name can be stored at _destination_.
|
||||
|
||||
## OPTIONS
|
||||
**--all**, **-a**
|
||||
If one of the images in __src__ refers to a list of images, instead of copying just the image which matches the current OS and
|
||||
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
|
||||
the images in the list, and the list itself.
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--src-authfile** _path_
|
||||
|
||||
Path of the authentication file for the source registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dest-authfile** _path_
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dry-run**
|
||||
|
||||
Run the sync without actually copying data to the destination.
|
||||
|
||||
**--src**, **-s** _transport_ Transport for the source repository.
|
||||
|
||||
**--dest**, **-d** _transport_ Destination transport.
|
||||
|
||||
**--format**, **-f** _manifest-type_ Manifest Type (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks).
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by** _key-id_
|
||||
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-passphrase-file** _path_
|
||||
|
||||
The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-private-key`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry.
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry.
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates (*.crt, *.cert, *.key) at _path_ to connect to the source registry or daemon.
|
||||
|
||||
**--src-no-creds** Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify**=_bool_ Require HTTPS and verify certificates when talking to a container source registry or daemon. Default to source registry entry in registry.conf setting.
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates (*.crt, *.cert, *.key) at _path_ to connect to the destination registry or daemon.
|
||||
|
||||
**--dest-no-creds** Access the registry anonymously.
|
||||
|
||||
**--dest-tls-verify**=_bool_ Require HTTPS and verify certificates when talking to a container destination registry or daemon. Default to destination registry entry in registry.conf setting.
|
||||
|
||||
**--src-registry-token** _Bearer token_ for accessing the source registry.
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
|
||||
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--keep-going**
|
||||
If any errors occur during copying of images, those errors are logged and the process continues syncing rest of the images and finally fails at the end.
|
||||
|
||||
**--src-username**
|
||||
|
||||
The username to access the source registry.
|
||||
|
||||
**--src-password**
|
||||
|
||||
The password to access the source registry.
|
||||
|
||||
**--dest-username**
|
||||
|
||||
The username to access the destination registry.
|
||||
|
||||
**--dest-password**
|
||||
|
||||
The password to access the destination registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
### Synchronizing to a local directory
|
||||
```
|
||||
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
|
||||
```
|
||||
Images are located at:
|
||||
```
|
||||
/media/usb/busybox:1-glibc
|
||||
/media/usb/busybox:1-musl
|
||||
/media/usb/busybox:1-ubuntu
|
||||
...
|
||||
/media/usb/busybox:latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry from local
|
||||
Images are located at:
|
||||
```
|
||||
/media/usb/busybox:1-glibc
|
||||
```
|
||||
Sync run
|
||||
```
|
||||
$ skopeo sync --src dir --dest docker /media/usb/busybox:1-glibc my-registry.local.lan/test/
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
REPO TAGS
|
||||
my-registry.local.lan/test/busybox 1-glibc
|
||||
```
|
||||
|
||||
### Synchronizing to a local directory, scoped
|
||||
```
|
||||
$ skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb
|
||||
```
|
||||
Images are located at:
|
||||
```
|
||||
/media/usb/registry.example.com/busybox:1-glibc
|
||||
/media/usb/registry.example.com/busybox:1-musl
|
||||
/media/usb/registry.example.com/busybox:1-ubuntu
|
||||
...
|
||||
/media/usb/registry.example.com/busybox:latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry
|
||||
```
|
||||
skopeo sync --src docker --dest docker registry.example.com/busybox my-registry.local.lan
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
REPO TAGS
|
||||
registry.local.lan/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry keeping the repository
|
||||
```
|
||||
skopeo sync --src docker --dest docker registry.example.com/repo/busybox my-registry.local.lan/repo
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
REPO TAGS
|
||||
registry.local.lan/repo/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
|
||||
```
|
||||
|
||||
### YAML file content (used _source_ for `**--src yaml**`)
|
||||
|
||||
```yaml
|
||||
registry.example.com:
|
||||
images:
|
||||
busybox: []
|
||||
redis:
|
||||
- "1.0"
|
||||
- "2.0"
|
||||
- "sha256:0000000000000000000000000000000011111111111111111111111111111111"
|
||||
images-by-tag-regex:
|
||||
nginx: ^1\.13\.[12]-alpine-perl$
|
||||
credentials:
|
||||
username: john
|
||||
password: this is a secret
|
||||
tls-verify: true
|
||||
cert-dir: /home/john/certs
|
||||
quay.io:
|
||||
tls-verify: false
|
||||
images:
|
||||
coreos/etcd:
|
||||
- latest
|
||||
```
|
||||
If the yaml filename is `sync.yml`, sync run:
|
||||
```
|
||||
skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
|
||||
```
|
||||
This will copy the following images:
|
||||
- Repository `registry.example.com/busybox`: all images, as no tags are specified.
|
||||
- Repository `registry.example.com/redis`: images tagged "1.0" and "2.0" along with image with digest "sha256:0000000000000000000000000000000011111111111111111111111111111111".
|
||||
- Repository `registry.example.com/nginx`: images tagged "1.13.1-alpine-perl" and "1.13.2-alpine-perl".
|
||||
- Repository `quay.io/coreos/etcd`: images tagged "latest".
|
||||
|
||||
For the registry `registry.example.com`, the "john"/"this is a secret" credentials are used, with server TLS certificates located at `/home/john/certs`.
|
||||
|
||||
TLS verification is normally enabled, and it can be disabled setting `tls-verify` to `false`.
|
||||
In the above example, TLS verification is enabled for `registry.example.com`, while is
|
||||
disabled for `quay.io`.
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Flavio Castelli <fcastelli@suse.com>, Marco Vedovati <mvedovati@suse.com>
|
||||
@@ -27,13 +27,13 @@ its functionality. It also does not require root, unless you are copying images
|
||||
Most commands refer to container images, using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
**containers-storage:**_docker-reference_
|
||||
An image located in a local containers/storage image store. Location and image store specified in /etc/containers/storage.conf
|
||||
An image located in a local containers/storage image store. Both the location and image store are specified in /etc/containers/storage.conf. (Backend for Podman, CRI-O, Buildah and friends)
|
||||
|
||||
**dir:**_path_
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(skopeo login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
@@ -44,51 +44,83 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
**oci:**_path_**:**_tag_
|
||||
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
**oci-archive:**_path_**:**_tag_
|
||||
An image _tag_ in a tar archive compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
See [containers-transports(5)](https://github.com/containers/image/blob/master/docs/containers-transports.5.md) for details.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--debug** enable debug output
|
||||
**--command-timeout** _duration_
|
||||
|
||||
**--policy** _path-to-policy_ Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
|
||||
Timeout for the command execution.
|
||||
|
||||
**--insecure-policy** Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
|
||||
**--debug**
|
||||
|
||||
**--registries.d** _dir_ use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
|
||||
enable debug output
|
||||
|
||||
**--override-arch** _arch_ Use _arch_ instead of the architecture of the machine for choosing images.
|
||||
**--help**, **-h**
|
||||
|
||||
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
|
||||
Show help
|
||||
|
||||
**--command-timeout** _duration_ Timeout for the command execution.
|
||||
**--insecure-policy**
|
||||
|
||||
**--help**|**-h** Show help
|
||||
Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
**--override-arch** _arch_
|
||||
|
||||
Use _arch_ instead of the architecture of the machine for choosing images.
|
||||
|
||||
**--override-os** _os_
|
||||
|
||||
Use _OS_ instead of the running OS for choosing images.
|
||||
|
||||
**--override-variant** _variant_
|
||||
|
||||
Use _variant_ instead of the running architecture variant for choosing images.
|
||||
|
||||
**--policy** _path-to-policy_
|
||||
|
||||
Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
|
||||
|
||||
**--registries.d** _dir_
|
||||
|
||||
Use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
|
||||
|
||||
**--tmpdir** _dir_
|
||||
|
||||
Directory used to store temporary files. Defaults to /var/tmp.
|
||||
|
||||
**--version**, **-v**
|
||||
|
||||
Print the version number
|
||||
|
||||
## COMMANDS
|
||||
|
||||
| Command | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List image names in a transport-specific collection of images.|
|
||||
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
The policy format is documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md) .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
The contents of this directory are documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md).
|
||||
|
||||
## SEE ALSO
|
||||
podman-login(1), docker-login(1)
|
||||
skopeo-login(1), docker-login(1), containers-auth.json(5), containers-storage.conf(5), containers-policy.json(5), containers-transports(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
|
||||
598
docs/skopeo.svg
598
docs/skopeo.svg
@@ -1,546 +1,74 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="480.61456"
|
||||
height="472.66098"
|
||||
viewBox="0 0 127.1626 125.05822"
|
||||
version="1.1"
|
||||
id="svg8"
|
||||
inkscape:version="0.92.2 5c3e80d, 2017-08-06"
|
||||
sodipodi:docname="skopeo.svg"
|
||||
inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/skopeo.color.png"
|
||||
inkscape:export-xdpi="90"
|
||||
inkscape:export-ydpi="90">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84477">
|
||||
<stop
|
||||
style="stop-color:#0093d9;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84473" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84475" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84469">
|
||||
<stop
|
||||
style="stop-color:#f6e6c8;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84465" />
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84467" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84461">
|
||||
<stop
|
||||
style="stop-color:#bfdce8;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84457" />
|
||||
<stop
|
||||
style="stop-color:#2a72ac;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84459" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84420">
|
||||
<stop
|
||||
style="stop-color:#a7a9ac;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84416" />
|
||||
<stop
|
||||
style="stop-color:#e7e8e9;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84418" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84347">
|
||||
<stop
|
||||
style="stop-color:#2c2d2f;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84343" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84345" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84339">
|
||||
<stop
|
||||
style="stop-color:#002442;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84335" />
|
||||
<stop
|
||||
style="stop-color:#151617;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84337" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84331">
|
||||
<stop
|
||||
style="stop-color:#003d6e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84327" />
|
||||
<stop
|
||||
style="stop-color:#59b5ff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84329" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84323">
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84319" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84321" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84325"
|
||||
x1="221.5741"
|
||||
y1="250.235"
|
||||
x2="219.20772"
|
||||
y2="221.99771"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84331"
|
||||
id="linearGradient84333"
|
||||
x1="223.23239"
|
||||
y1="212.83418"
|
||||
x2="245.52328"
|
||||
y2="129.64345"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84341"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84347"
|
||||
id="linearGradient84349"
|
||||
x1="212.05453"
|
||||
y1="215.20055"
|
||||
x2="237.73705"
|
||||
y2="230.02835"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84363"
|
||||
x1="193.61516"
|
||||
y1="225.045"
|
||||
x2="224.08698"
|
||||
y2="223.54327"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84377"
|
||||
x1="182.72513"
|
||||
y1="222.54439"
|
||||
x2="184.01024"
|
||||
y2="210.35291"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84408"
|
||||
x1="211.73801"
|
||||
y1="225.48302"
|
||||
x2="204.24324"
|
||||
y2="238.46432"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84422"
|
||||
x1="190.931"
|
||||
y1="221.83777"
|
||||
x2="187.53873"
|
||||
y2="229.26593"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84425"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84441"
|
||||
x1="169.95944"
|
||||
y1="215.77036"
|
||||
x2="174.0289"
|
||||
y2="207.81528"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84455"
|
||||
x1="234.08092"
|
||||
y1="252.39755"
|
||||
x2="245.88477"
|
||||
y2="251.21777"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84461"
|
||||
id="radialGradient84463"
|
||||
cx="213.19594"
|
||||
cy="223.40646"
|
||||
fx="214.12064"
|
||||
fy="217.34077"
|
||||
r="33.39888"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(2.6813748,0.05304973,-0.0423372,2.1399146,-349.74924,-255.6421)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84469"
|
||||
id="radialGradient84471"
|
||||
cx="207.18298"
|
||||
cy="211.06483"
|
||||
fx="207.18298"
|
||||
fy="211.06483"
|
||||
r="2.77954"
|
||||
gradientTransform="matrix(1.4407627,0.18685239,-0.24637721,1.8997405,-38.989952,-218.98841)"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84477"
|
||||
id="linearGradient84479"
|
||||
x1="241.60336"
|
||||
y1="255.46982"
|
||||
x2="244.45177"
|
||||
y2="250.4846"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<svg width="168.71024mm" height="145.54036mm" viewBox="0 0 168.71024 145.54036" version="1.1" id="svg2674" inkscape:version="1.2 (dc2aedaf03, 2022-05-15)" sodipodi:docname="skopeo-badge-full-vert.svg" inkscape:export-filename="skopeo-badge-full-vert.png" inkscape:export-xdpi="51.86108" inkscape:export-ydpi="51.86108" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<defs id="defs2668">
|
||||
<inkscape:path-effect is_visible="true" id="path-effect10334" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect10336" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect9986" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect9984" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect10300" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect10304" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect124972" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect124976" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163593" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163605" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163611" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163615" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163619" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163629" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163633" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163651" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163655" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163597" effect="spiro" lpeversion="0"/>
|
||||
</defs>
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1"
|
||||
inkscape:cx="517.27113"
|
||||
inkscape:cy="314.79773"
|
||||
inkscape:document-units="mm"
|
||||
inkscape:current-layer="layer1"
|
||||
inkscape:document-rotation="0"
|
||||
showgrid="false"
|
||||
units="px"
|
||||
inkscape:snap-global="false"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1376"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0" />
|
||||
<metadata
|
||||
id="metadata5">
|
||||
<sodipodi:namedview id="base" pagecolor="#ffffff" bordercolor="#666666" borderopacity="1.0" inkscape:pageopacity="0.0" inkscape:pageshadow="2" inkscape:zoom="0.7" inkscape:cx="399.28571" inkscape:cy="187.14286" inkscape:document-units="mm" inkscape:current-layer="g1208" showgrid="false" fit-margin-top="10" fit-margin-left="10" fit-margin-right="10" fit-margin-bottom="10" inkscape:window-width="2560" inkscape:window-height="1403" inkscape:window-x="0" inkscape:window-y="0" inkscape:window-maximized="1" inkscape:pagecheckerboard="0" inkscape:showpageshadow="2" inkscape:deskcolor="#d1d1d1"/>
|
||||
<metadata id="metadata2671">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<cc:Work rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-149.15784,-175.92614)">
|
||||
<g
|
||||
id="g84497"
|
||||
style="stroke-width:1.32291663;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
transform="translate(0,10.583333)">
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84485"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="299.48376"
|
||||
y="87.963303"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84487"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="258.07639"
|
||||
y="92.60083"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84489"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="253.2236"
|
||||
y="91.796227"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="86.859642"
|
||||
x="331.21924"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84491"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<g inkscape:label="Layer 1" inkscape:groupmode="layer" id="layer1" transform="translate(378.90631,201.21016)">
|
||||
<g id="g1208">
|
||||
<g id="g81584" transform="matrix(1.7276536,0,0,1.7276536,-401.82487,-530.26362)" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/new skopeo/skopeo-logomark_medium_transparent-bg.png" inkscape:export-xdpi="51.86108" inkscape:export-ydpi="51.86108">
|
||||
<g style="fill:#ffffff;fill-opacity:1;stroke:#3c6eb4;stroke-opacity:1" id="g81528" transform="translate(-734.38295,98.0028)">
|
||||
<path inkscape:connector-curvature="0" style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:#3c6eb4;stroke-width:1.05833;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 796.57913,145.63255 -19.29817,-9.23285 -4.82036,-20.8616 13.2871,-16.780616 21.38926,-0.06408 13.38485,16.701146 -4.69887,20.8897 z" id="path81526"/>
|
||||
</g>
|
||||
<g transform="matrix(0.43729507,0,0,0.43729507,42.235192,80.461942)" id="g81554">
|
||||
<rect style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81530" width="16.725054" height="9.8947001" x="158.13725" y="255.21965" transform="rotate(30)"/>
|
||||
<rect style="fill:#ffffff;stroke:#000000;stroke-width:1.32292;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6" id="rect81532" width="4.8383565" height="11.503917" x="153.28447" y="254.41505" transform="rotate(30)"/>
|
||||
<path sodipodi:nodetypes="cczc" inkscape:connector-curvature="0" id="path81534" d="m 78.802289,335.54596 -9.111984,15.78242 c 1.40192,0.25963 4.990131,-0.63196 7.869989,-5.61868 2.879866,-4.98671 2.168498,-9.07865 1.241995,-10.16374 z" style="fill:#9dc6e7;fill-opacity:1;stroke:#2a72ac;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1"/>
|
||||
<rect transform="rotate(30)" y="250.58212" x="199.54463" height="19.16976" width="31.605196" id="rect81536" style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1"/>
|
||||
<rect transform="rotate(30)" style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81538" width="16.459545" height="15.252436" x="178.48766" y="252.54079"/>
|
||||
<g style="stroke:#808080;stroke-opacity:1" id="g81548">
|
||||
<rect style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81540" width="4.521956" height="21.377089" x="195.04353" y="249.47847" transform="rotate(30)"/>
|
||||
<rect y="251.64348" x="174.76939" height="17.047071" width="3.617183" id="rect81542" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" transform="rotate(30)"/>
|
||||
<rect style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81544" width="4.8383565" height="11.503917" x="153.28447" y="254.41505" transform="rotate(30)"/>
|
||||
<rect y="249.47847" x="231.28011" height="21.377089" width="4.521956" id="rect81546" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81574;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" transform="rotate(30)"/>
|
||||
</g>
|
||||
<path inkscape:connector-curvature="0" id="path81550" d="m 47.691007,322.31629 22.49734,12.98884" style="fill:#ffffff;fill-rule:evenodd;stroke:#ffffff;stroke-width:3.02523;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
|
||||
<path style="fill:#ffffff;fill-rule:evenodd;stroke:#ffffff;stroke-width:3.02523;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 27.886021,312.45704 9.423431,5.07506" id="path81552" inkscape:connector-curvature="0"/>
|
||||
</g>
|
||||
<g transform="matrix(0.43729507,0,0,0.43729507,42.235192,101.28812)" id="g81568">
|
||||
<path style="fill:#2a72ac;fill-opacity:1;stroke:#003e6f;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" d="m 34.507847,231.71327 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z" id="path81556" inkscape:connector-curvature="0" sodipodi:nodetypes="cccccc"/>
|
||||
<path sodipodi:nodetypes="ccccc" inkscape:connector-curvature="0" id="path81558" d="m 28.119527,245.45648 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z" style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6"/>
|
||||
<path style="fill:#4d4d4d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 24.616967,251.5299 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z" id="path81560" inkscape:connector-curvature="0" sodipodi:nodetypes="ccccc"/>
|
||||
<ellipse ry="3.8438656" rx="3.8395541" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="ellipse81562" cx="39.230743" cy="255.66997"/>
|
||||
<path sodipodi:nodetypes="ccc" style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#9dc6e7;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 71.999346,266.02935 -8.9307,-5.38071 10.81942,-5.07707" id="path81564" inkscape:connector-curvature="0"/>
|
||||
<path style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#9dc6e7;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 35.169799,245.57008 10.37702,-6.1817 -7.12581,-2.30459" id="path81566" inkscape:connector-curvature="0" sodipodi:nodetypes="ccc"/>
|
||||
</g>
|
||||
<g style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-opacity:1" id="g81582" transform="translate(0.69195604,69.064926)">
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 83.087609,145.72448 -3.6551,1.27991" id="path81570" inkscape:connector-curvature="0" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 51.138114,129.84674 1.971302,3.71206" id="path81572" inkscape:connector-curvature="0" inkscape:export-xdpi="96.181694" inkscape:export-ydpi="96.181694"/>
|
||||
<path inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" inkscape:connector-curvature="0" id="path81574" d="m 70.63337,129.84674 -2.345479,4.17978" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" sodipodi:nodetypes="cc" inkscape:export-xdpi="96.181694" inkscape:export-ydpi="96.181694"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" inkscape:connector-curvature="0" id="path81576" d="m 61.405599,166.31541 v 5.83669" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" inkscape:connector-curvature="0" id="path81578" d="m 43.729779,164.25283 4.216366,-4.18995" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" sodipodi:nodetypes="cc" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 79.100039,164.25283 -1.50358,-1.57071" id="path81580" inkscape:connector-curvature="0" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
</g>
|
||||
</g>
|
||||
<text id="text81524" y="-73.044861" x="-363.40085" style="font-style:normal;font-weight:normal;font-size:37.592px;line-height:22.5552px;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e1ae4f;fill-opacity:1;stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" xml:space="preserve"><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#e1ae4f;fill-opacity:1;stroke-width:0.264583px" y="-73.044861" x="-363.40085" id="tspan81522" sodipodi:role="line" dx="0 0 0 0 0 0"><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#294172;fill-opacity:1" id="tspan81514">sk</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#2a72ac;fill-opacity:1" id="tspan81516">o</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#294172;fill-opacity:1" id="tspan81518">pe</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#2a72ac;fill-opacity:1" id="tspan81520">o</tspan></tspan></text>
|
||||
</g>
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
id="path84483"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="cccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84481"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<circle
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="path84224"
|
||||
cx="213.64427"
|
||||
cy="234.18927"
|
||||
r="35.482784" />
|
||||
<circle
|
||||
r="33.39888"
|
||||
cy="234.18927"
|
||||
cx="213.64427"
|
||||
id="circle84226"
|
||||
style="fill:url(#radialGradient84463);fill-opacity:1;stroke:none;stroke-width:0.52916664;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84114"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="304.77545"
|
||||
y="97.128738"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84116"
|
||||
width="4.521956"
|
||||
height="21.377089"
|
||||
x="300.27435"
|
||||
y="96.025078"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="99.087395"
|
||||
x="283.71848"
|
||||
height="15.252436"
|
||||
width="16.459545"
|
||||
id="rect84118"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="98.190086"
|
||||
x="280.00021"
|
||||
height="17.047071"
|
||||
width="3.617183"
|
||||
id="rect84120"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84122"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="263.36807"
|
||||
y="101.76627"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84124"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="258.51526"
|
||||
y="100.96166"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="96.025078"
|
||||
x="336.51093"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84126"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84325);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 207.24023,252.71811 25.53907,14.74414 8.52539,-14.76953 -25.53711,-14.74415 z"
|
||||
id="rect84313"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84128"
|
||||
d="m 215.3335,241.36799 22.49734,12.98884"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84130"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 195.97877,212.80238 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z"
|
||||
id="path84134"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccccc" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
id="path84136"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84422);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 186.31445,239.41146 1.30078,0.75 7.46485,-12.92968 -1.30078,-0.75 z"
|
||||
id="rect84410"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84349);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 193.92188,218.48568 44.21289,25.55469 2.44335,-4.23242 -44.21289,-25.55664 z"
|
||||
id="path84284"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84363);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 189.98438,240.4935 12.42187,7.16992 6.56641,-11.375 -12.42188,-7.16992 z"
|
||||
id="rect84351"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84377);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 173.69727,227.99936 12.65234,7.30273 3.88867,-6.73633 -12.65234,-7.30273 z"
|
||||
id="rect84365"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84138"
|
||||
d="m 192.47621,218.8758 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<ellipse
|
||||
cy="223.01579"
|
||||
cx="207.08998"
|
||||
id="circle84140"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
rx="3.8395541"
|
||||
ry="3.8438656" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84333);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 197.35938,212.35287 44.36523,25.64453 7.58984,-10.83203 -20.82617,-18.73242 -25.55078,-8.08399 z"
|
||||
id="path84272"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84142"
|
||||
d="m 200.6837,212.37603 11.49279,-6.98413 -8.11935,-2.73742"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84144"
|
||||
d="m 241.31895,235.3047 -8.04514,-4.75769 10.057,-4.72299"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccc"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.52899998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 241.06868,235.79543 -8.9307,-5.38071 10.81942,-5.07707"
|
||||
id="path84280"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 200.60886,211.70589 10.37702,-6.1817 -7.12581,-2.30459"
|
||||
id="path84290"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
style="fill:url(#radialGradient84471);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 206.89258,220.23959 -0.29297,0.0352 -0.23633,0.0527 -0.26953,0.0898 -0.2793,0.125 -0.23437,0.13477 -0.20508,0.14648 -0.2207,0.19532 -0.18946,0.20117 -0.006,0.008 0.004,-0.008 -0.006,0.01 -0.008,0.01 -0.004,0.004 -0.006,0.006 -0.12109,0.1582 -0.002,0.004 -0.002,0.002 -0.16406,0.26758 -0.12109,0.24804 -0.0996,0.28125 -0.0645,0.24219 -0.0371,0.26367 -0.0176,0.31641 0.008,0.18164 0.0332,0.28711 0.0527,0.23437 0.004,0.0117 0.0937,0.28516 0.11133,0.24805 0.13086,0.23046 0.16992,0.23829 0.1836,0.20898 0.21093,0.19727 0.19532,0.14843 0.25586,0.15625 0.24218,0.11719 0.26172,0.0977 0.27344,0.0684 0.27344,0.043 0.29297,0.0137 0.18164,-0.008 0.29687,-0.0351 0.24024,-0.0547 0.27539,-0.0898 0.24218,-0.10938 0.25,-0.14453 0.23047,-0.16406 0.20899,-0.1836 0.20508,-0.21875 0.125,-0.16406 0.004,-0.006 0.1582,-0.25781 0.004,-0.008 0.12695,-0.26172 0.0996,-0.27344 0.002,-0.006 0.0586,-0.24023 0.0391,-0.26563 0.0176,-0.3125 -0.008,-0.17968 -0.0332,-0.28711 -0.0527,-0.23438 -0.004,-0.0117 -0.0937,-0.28515 -0.11132,-0.24805 -0.13086,-0.23047 -0.16993,-0.23828 -0.18554,-0.20899 -0.19922,-0.18945 -0.21875,-0.16406 -0.23828,-0.14844 -0.26563,-0.12695 -0.01,-0.004 -0.21875,-0.0801 -0.28516,-0.0723 -0.27344,-0.043 -0.29492,-0.0137 z"
|
||||
id="ellipse84292"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84425);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 183.23633,227.10092 c 5.59753,3.20336 12.36881,4.51528 18.71366,3.17108 1.59516,-0.38 3.17489,-0.99021 4.44874,-2.04739 -0.73893,-0.64617 -1.68301,-0.99544 -2.49844,-1.53493 -3.78032,-2.18293 -7.56064,-4.36587 -11.34096,-6.5488 -3.10767,2.32001 -6.21533,4.64003 -9.323,6.96004 z"
|
||||
id="path84298"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84479);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 238.62695,269.97787 0.006,-0.002 0.39453,-0.27735 0.41797,-0.34179 0.002,-0.002 0.45703,-0.42382 0.47851,-0.49219 0.0156,-0.0176 0.47656,-0.53711 0.002,-0.002 0.0117,-0.0137 0.48438,-0.5918 0.0117,-0.0156 0.49023,-0.64257 0.01,-0.0137 0.49609,-0.69726 0.48047,-0.71875 0.01,-0.0137 0.46485,-0.74805 0.004,-0.008 0.002,-0.002 0.30468,-0.51562 0.008,-0.0117 0.4375,-0.78711 0.40625,-0.77734 0.008,-0.0137 0.37109,-0.77149 0.008,-0.0156 0.33789,-0.75977 0.006,-0.0156 0.30078,-0.73829 0.27148,-0.74609 0.21289,-0.66602 0.17969,-0.66796 v -0.002 l 0.12305,-0.58203 0.002,-0.0137 0.0723,-0.51562 0.0176,-0.31836 z"
|
||||
id="path84379"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84408);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 202.78906,251.42318 2.08399,1.20118 9.6289,-16.67969 -2.08203,-1.20117 z"
|
||||
id="rect84396"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84441);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 169.0918,226.26889 2.35937,1.36133 4.69336,-8.13086 -2.35937,-1.36133 z"
|
||||
id="rect84429"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84455);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 234.17188,269.53842 2.08203,1.20312 9.63086,-16.67773 -2.08399,-1.20313 z"
|
||||
id="rect84443"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#f8ead2;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 215.55025,240.82707 22.49734,12.98884"
|
||||
id="path84521"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 14 KiB |
144
go.mod
144
go.mod
@@ -1,46 +1,108 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
go 1.12
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/VividCortex/ewma v1.1.1 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8 // indirect
|
||||
github.com/containers/buildah v1.8.4
|
||||
github.com/containers/image v3.0.1+incompatible
|
||||
github.com/containers/storage v1.13.0
|
||||
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 // indirect
|
||||
github.com/docker/docker v0.0.0-20180522102801-da99009bbb11
|
||||
github.com/docker/docker-credential-helpers v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||
github.com/etcd-io/bbolt v1.3.2 // indirect
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 // indirect
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f // indirect
|
||||
github.com/gorilla/context v0.0.0-20140604161150-14f550f51af5 // indirect
|
||||
github.com/gorilla/mux v0.0.0-20140926153814-e444e69cbd2e // indirect
|
||||
github.com/imdario/mergo v0.0.0-20141206190957-6633656539c1 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.4 // indirect
|
||||
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec v0.0.0-20180918080442-7b1e489870ac
|
||||
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
|
||||
github.com/opencontainers/runtime-spec v1.0.0 // indirect
|
||||
github.com/pborman/uuid v0.0.0-20160209185913-a97ce2ca70fa // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||
github.com/ulikunitz/xz v0.5.4 // indirect
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/vbauerster/mpb v3.4.0+incompatible // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.1.0 // indirect
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e // indirect
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e // indirect
|
||||
gopkg.in/yaml.v2 v2.0.0-20141029210843-d466437aa4ad // indirect
|
||||
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 // indirect
|
||||
github.com/containers/common v0.49.1
|
||||
github.com/containers/image/v5 v5.22.1
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/storage v1.42.0
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.2.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.3 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.10.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kr/pretty v0.2.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.3 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.7.1 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.3.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.4.2 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.3.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
|
||||
61
hack/get_ci_vm.sh
Executable file
61
hack/get_ci_vm.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# For help and usage information, simply execute the script w/o any arguments.
|
||||
#
|
||||
# This script is intended to be run by Red Hat skopeo developers who need
|
||||
# to debug problems specifically related to Cirrus-CI automated testing.
|
||||
# It requires that you have been granted prior access to create VMs in
|
||||
# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
|
||||
# with supervision upon request.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
||||
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
||||
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||
|
||||
# Help detect if we were called by get_ci_vm container
|
||||
GET_CI_VM="${GET_CI_VM:-0}"
|
||||
in_get_ci_vm() {
|
||||
if ((GET_CI_VM==0)); then
|
||||
echo "Error: $1 is not intended for use in this context"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
# get_ci_vm APIv1 container entrypoint calls into this script
|
||||
# to obtain required repo. specific configuration options.
|
||||
if [[ "$1" == "--config" ]]; then
|
||||
in_get_ci_vm "$1"
|
||||
cat <<EOF
|
||||
DESTDIR="/var/tmp/go/src/github.com/containers/skopeo"
|
||||
UPSTREAM_REPO="https://github.com/containers/skopeo.git"
|
||||
GCLOUD_PROJECT="skopeo"
|
||||
GCLOUD_IMGPROJECT="libpod-218412"
|
||||
GCLOUD_CFG="skopeo"
|
||||
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-f}"
|
||||
GCLOUD_CPUS="2"
|
||||
GCLOUD_MEMORY="4Gb"
|
||||
GCLOUD_DISK="200"
|
||||
EOF
|
||||
elif [[ "$1" == "--setup" ]]; then
|
||||
in_get_ci_vm "$1"
|
||||
# get_ci_vm container entrypoint calls us with this option on the
|
||||
# Cirrus-CI environment instance, to perform repo.-specific setup.
|
||||
echo "+ Executing setup" > /dev/stderr
|
||||
${GOSRC}/${SCRIPT_BASE}/runner.sh setup
|
||||
else
|
||||
# Create and access VM for specified Cirrus-CI task
|
||||
mkdir -p $HOME/.config/gcloud/ssh
|
||||
podman run -it --rm \
|
||||
--tz=local \
|
||||
-e NAME="$USER" \
|
||||
-e SRCDIR=/src \
|
||||
-e GCLOUD_ZONE="$GCLOUD_ZONE" \
|
||||
-e DEBUG="${DEBUG:-0}" \
|
||||
-v $REPO_DIRPATH:/src:O \
|
||||
-v $HOME/.config/gcloud:/root/.config/gcloud:z \
|
||||
-v $HOME/.config/gcloud/ssh:/root/.ssh:z \
|
||||
quay.io/libpod/get_ci_vm:latest "$@"
|
||||
fi
|
||||
34
hack/get_fqin.sh
Executable file
34
hack/get_fqin.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is intended to be called from the Makefile. It's purpose
|
||||
# is to automation correspondence between the environment used for local
|
||||
# development and CI.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
||||
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
||||
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||
|
||||
# When running under CI, we already have the necessary information,
|
||||
# simply provide it to the Makefile.
|
||||
if [[ -n "$SKOPEO_CIDEV_CONTAINER_FQIN" ]]; then
|
||||
echo "$SKOPEO_CIDEV_CONTAINER_FQIN"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -n $(command -v podman) ]]; then CONTAINER_RUNTIME=podman; fi
|
||||
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-docker}
|
||||
|
||||
# Borrow the get_ci_vm container image since it's small, and
|
||||
# by necessity contains a script that can accurately interpret
|
||||
# env. var. values from any .cirrus.yml runtime context.
|
||||
$CONTAINER_RUNTIME run --rm \
|
||||
--security-opt label=disable \
|
||||
-v $REPO_DIRPATH:/src:ro \
|
||||
--entrypoint=/usr/share/automation/bin/cirrus-ci_env.py \
|
||||
quay.io/libpod/get_ci_vm:latest \
|
||||
--envs="Skopeo Test" /src/.cirrus.yml | \
|
||||
egrep -m1 '^SKOPEO_CIDEV_CONTAINER_FQIN' | \
|
||||
awk -F "=" -e '{print $2}' | \
|
||||
tr -d \'\"
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
|
||||
19
hack/libsubid_tag.sh
Executable file
19
hack/libsubid_tag.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
if test $(${GO:-go} env GOOS) != "linux" ; then
|
||||
exit 0
|
||||
fi
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
cc -o "$tmpdir"/libsubid_tag -l subid -x c - > /dev/null 2> /dev/null << EOF
|
||||
#include <shadow/subid.h>
|
||||
int main() {
|
||||
struct subid_range *ranges = NULL;
|
||||
get_subuid_ranges("root", &ranges);
|
||||
free(ranges);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if test $? -eq 0 ; then
|
||||
echo libsubid
|
||||
fi
|
||||
56
hack/make.sh
56
hack/make.sh
@@ -2,15 +2,14 @@
|
||||
set -e
|
||||
|
||||
# This script builds various binary from a checkout of the skopeo
|
||||
# source code.
|
||||
# source code. DO NOT CALL THIS SCRIPT DIRECTLY.
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the skopeo source code
|
||||
# (https://github.com/containers/skopeo). Whatever version is checked out
|
||||
# will be built.
|
||||
# - The script is intended to be run inside the docker container specified
|
||||
# in the Dockerfile at the root of the source. In other words:
|
||||
# DO NOT CALL THIS SCRIPT DIRECTLY.
|
||||
# - The script is intended to be run inside the container specified
|
||||
# in the output of hack/get_fqin.sh
|
||||
# - The right way to call this script is to invoke "make" from
|
||||
# your checkout of the skopeo repository.
|
||||
# the Makefile will do a "docker build -t skopeo ." and then
|
||||
@@ -23,22 +22,19 @@ export SKOPEO_PKG='github.com/containers/skopeo'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
# We're a nice, sexy, little shell script, and people might try to run us;
|
||||
# but really, they shouldn't. We want to be in a container!
|
||||
inContainer="AssumeSoInitially"
|
||||
if [ "$PWD" != "/go/src/$SKOPEO_PKG" ]; then
|
||||
unset inContainer
|
||||
fi
|
||||
# Set this to 1 to enable installation/modification of environment/services
|
||||
export SKOPEO_CONTAINER_TESTS=${SKOPEO_CONTAINER_TESTS:-0}
|
||||
|
||||
if [ -z "$inContainer" ]; then
|
||||
{
|
||||
echo "# WARNING! I don't seem to be running in a Docker container."
|
||||
echo "# The result of this command might be an incorrect build, and will not be"
|
||||
echo "# officially supported."
|
||||
echo "#"
|
||||
echo "# Try this instead: make all"
|
||||
echo "#"
|
||||
} >&2
|
||||
if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
|
||||
(
|
||||
echo "***************************************************************"
|
||||
echo "WARNING: Executing tests directly on the local development"
|
||||
echo " host is highly discouraged. Many important items"
|
||||
echo " will be skipped. For manual execution, please utilize"
|
||||
echo " the Makefile targets WITHOUT the '-local' suffix."
|
||||
echo "***************************************************************"
|
||||
) > /dev/stderr
|
||||
sleep 5s
|
||||
fi
|
||||
|
||||
echo
|
||||
@@ -57,25 +53,21 @@ DEFAULT_BUNDLES=(
|
||||
test-integration
|
||||
)
|
||||
|
||||
TESTFLAGS+=" -test.timeout=10m"
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources
|
||||
# See also the top-level Makefile.
|
||||
mod_vendor=
|
||||
if go help mod >/dev/null 2>&1; then
|
||||
export GO111MODULE=on
|
||||
mod_vendor='-mod=vendor'
|
||||
fi
|
||||
|
||||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
|
||||
#
|
||||
# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
|
||||
# to run certain tests on your local host, you should run with command:
|
||||
#
|
||||
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
|
||||
#
|
||||
go_test_dir() {
|
||||
dir=$1
|
||||
(
|
||||
echo '+ go test' $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"} "${SKOPEO_PKG}${dir#.}"
|
||||
echo '+ go test' $mod_vendor $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"} "${SKOPEO_PKG}${dir#.}"
|
||||
cd "$dir"
|
||||
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
|
||||
go test $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"}
|
||||
go test $mod_vendor $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"}
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
VALIDATE_BRANCH='main'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
set -e
|
||||
|
||||
bundle_test_integration() {
|
||||
TESTFLAGS="$TESTFLAGS -check.v"
|
||||
go_test_dir ./integration
|
||||
}
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
(
|
||||
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
make PREFIX=/usr install
|
||||
bundle_test_integration
|
||||
) 2>&1
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Before running podman for the first time, make sure
|
||||
# to set storage to vfs (not overlay): podman-in-podman
|
||||
# doesn't work with overlay. And, disable mountopt,
|
||||
# which causes error with vfs.
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
# These tests can run in/outside of a container. However,
|
||||
# not all storage drivers are supported in a container
|
||||
# environment. Detect this and setup storage when
|
||||
# running in a container.
|
||||
if ((SKOPEO_CONTAINER_TESTS)) && [[ -r /etc/containers/storage.conf ]]; then
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
elif ((SKOPEO_CONTAINER_TESTS)); then
|
||||
cat >> /etc/containers/storage.conf << EOF
|
||||
[storage]
|
||||
driver = "vfs"
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
make PREFIX=/usr install
|
||||
|
||||
# Run tests
|
||||
SKOPEO_BINARY=/usr/bin/skopeo bats --tap systemtest
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
errors=$(go vet $(go list -e ./... | grep -v "$SKOPEO_PKG"/vendor))
|
||||
errors=$(go vet -tags="${BUILDTAGS}" $mod_vendor $(go list $mod_vendor -e ./...))
|
||||
|
||||
if [ -z "$errors" ]; then
|
||||
echo 'Congratulations! All Go source files have been vetted.'
|
||||
|
||||
150
hack/man-page-checker
Executable file
150
hack/man-page-checker
Executable file
@@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# man-page-checker - validate and cross-reference man page names
|
||||
#
|
||||
# This is the script that cross-checks BETWEEN MAN PAGES. It is not the
|
||||
# script that cross-checks that each option in skopeo foo --help is listed
|
||||
# in skopeo-foo.1.md and vice-versa; that one is xref-helpmsgs-manpages.
|
||||
#
|
||||
|
||||
verbose=
|
||||
for i; do
|
||||
case "$i" in
|
||||
-v|--verbose) verbose=verbose ;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
die() {
|
||||
echo "$(basename $0): $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
cd $(dirname $0)/../docs || die "Please run me from top-level skopeo dir"
|
||||
|
||||
rc=0
|
||||
|
||||
# Pass 1: cross-check file names with NAME section
|
||||
#
|
||||
# for a given skopeo-foo.1.md, the NAME should be 'skopeo-foo'
|
||||
for md in *.1.md;do
|
||||
# Read the first line after '## NAME'
|
||||
name=$(egrep -A1 '^## NAME' $md|tail -1|awk '{print $1}' | tr -d \\\\)
|
||||
|
||||
expect=$(basename $md .1.md)
|
||||
if [ "$name" != "$expect" ]; then
|
||||
echo
|
||||
printf "Inconsistent program NAME in %s:\n" $md
|
||||
printf " NAME= %s (expected: %s)\n" $name $expect
|
||||
rc=1
|
||||
fi
|
||||
done
|
||||
|
||||
# Pass 2: compare descriptions.
|
||||
#
|
||||
# Make sure the descriptive text in skopeo-foo.1.md matches the one
|
||||
# in the table in skopeo.1.md.
|
||||
for md in $(ls -1 *-*.1.md);do
|
||||
desc=$(egrep -A1 '^## NAME' $md|tail -1|sed -E -e 's/^skopeo[^[:space:]]+ - //')
|
||||
|
||||
# Find the descriptive text in the main skopeo man page.
|
||||
parent=skopeo.1.md
|
||||
parent_desc=$(grep $md $parent | awk -F'|' '{print $3}' | sed -E -e 's/^[[:space:]]+//' -e 's/[[:space:]]+$//')
|
||||
|
||||
if [ "$desc" != "$parent_desc" ]; then
|
||||
echo
|
||||
printf "Inconsistent subcommand descriptions:\n"
|
||||
printf " %-32s = '%s'\n" $md "$desc"
|
||||
printf " %-32s = '%s'\n" $parent "$parent_desc"
|
||||
printf "Please ensure that the NAME section of $md\n"
|
||||
printf "matches the subcommand description in $parent\n"
|
||||
rc=1
|
||||
fi
|
||||
done
|
||||
|
||||
# Helper function: compares man page synopsis vs --help usage message
|
||||
function compare_usage() {
|
||||
local cmd="$1"
|
||||
local from_man="$2"
|
||||
|
||||
# Run 'cmd --help', grab the line immediately after 'Usage:'
|
||||
local help_output=$(../bin/$cmd --help)
|
||||
local from_help=$(echo "$help_output" | grep -A1 '^Usage:' | tail -1)
|
||||
|
||||
# strip off command name from both
|
||||
from_man=$(sed -E -e "s/\*\*$cmd\*\*[[:space:]]*//" <<<"$from_man")
|
||||
from_help=$(sed -E -e "s/^[[:space:]]*$cmd[[:space:]]*//" <<<"$from_help")
|
||||
|
||||
# man page lists 'foo [*options*]', help msg shows 'foo [command options]'.
|
||||
# Make sure if one has it, the other does too.
|
||||
if expr "$from_man" : "\[\*options\*\]" >/dev/null; then
|
||||
if expr "$from_help" : "\[command options\]" >/dev/null; then
|
||||
:
|
||||
else
|
||||
echo "WARNING: $cmd: man page shows '[*options*]', help does not show [command options]"
|
||||
rc=1
|
||||
fi
|
||||
elif expr "$from_help" : "\[command options\]" >/dev/null; then
|
||||
echo "WARNING: $cmd: --help shows [command options], man page does not show [*options*]"
|
||||
rc=1
|
||||
fi
|
||||
|
||||
# Strip off options and flags; start comparing arguments
|
||||
from_man=$(sed -E -e 's/^\[\*options\*\][[:space:]]*//' <<<"$from_man")
|
||||
from_help=$(sed -E -e 's/^\[command options\][[:space:]]*//' <<<"$from_help")
|
||||
|
||||
# Constant strings in man page are '**foo**', in --help are 'foo'.
|
||||
from_man=$(sed -E -e 's/\*\*([^*]+)\*\*/\1/g' <<<"$from_man")
|
||||
|
||||
# Args in man page are '_foo_', in --help are 'FOO'. Convert all to
|
||||
# UPCASE simply because it stands out better to the eye.
|
||||
from_man=$(sed -E -e 's/_([a-z-]+)_/\U\1/g' <<<"$from_man")
|
||||
|
||||
# Compare man-page and --help usage strings. Skip 'skopeo' itself,
|
||||
# because the man page includes '[global options]' which we don't grok.
|
||||
if [[ "$from_man" != "$from_help" && "$cmd" != "skopeo" ]]; then
|
||||
printf "%-25s man='%s' help='%s'\n" "$cmd:" "$from_man" "$from_help"
|
||||
rc=1
|
||||
fi
|
||||
}
|
||||
|
||||
# Pass 3: compare synopses.
|
||||
#
|
||||
# Make sure the SYNOPSIS line in skopeo-foo.1.md reads '**skopeo foo** ...'
|
||||
for md in *.1.md;do
|
||||
synopsis=$(egrep -A1 '^#* SYNOPSIS' $md|tail -1)
|
||||
|
||||
# Command name must be bracketed by double asterisks; options and
|
||||
# arguments are bracketed by single ones.
|
||||
# E.g. '**skopeo copy** [*options*] _..._'
|
||||
# Get the command name, and confirm that it matches the md file name.
|
||||
cmd=$(echo "$synopsis" | sed -E -e 's/^\*\*([^*]+)\*\*.*/\1/' | tr -d \*)
|
||||
# Use sed, not tr, so we only replace the first dash: we want
|
||||
# skopeo-list-tags -> "skopeo list-tags", not "skopeo list tags"
|
||||
md_nodash=$(basename "$md" .1.md | sed -e 's/-/ /')
|
||||
if [ "$cmd" != "$md_nodash" ]; then
|
||||
echo
|
||||
printf "Inconsistent program name in SYNOPSIS in %s:\n" $md
|
||||
printf " SYNOPSIS = %s (expected: '%s')\n" "$cmd" "$md_nodash"
|
||||
rc=1
|
||||
fi
|
||||
|
||||
# The convention is to use UPPER CASE in 'skopeo foo --help',
|
||||
# but *lower case bracketed by asterisks* in the man page
|
||||
if expr "$synopsis" : ".*[A-Z]" >/dev/null; then
|
||||
echo
|
||||
printf "Inconsistent capitalization in SYNOPSIS in %s\n" $md
|
||||
printf " '%s' should not contain upper-case characters\n" "$synopsis"
|
||||
rc=1
|
||||
fi
|
||||
|
||||
# (for debugging, and getting a sense of standard conventions)
|
||||
#printf " %-32s ------ '%s'\n" $md "$synopsis"
|
||||
|
||||
# If bin/skopeo is available, run "cmd --help" and compare Usage
|
||||
# messages. This is complicated, so do it in a helper function.
|
||||
compare_usage "$md_nodash" "$synopsis"
|
||||
done
|
||||
|
||||
|
||||
exit $rc
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
if pkg-config ostree-1 2> /dev/null ; then
|
||||
echo containers_image_ostree
|
||||
fi
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export GOPATH=$(pwd)/_gopath
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
_containers="${GOPATH}/src/github.com/containers"
|
||||
mkdir -vp ${_containers}
|
||||
ln -vsf $(pwd) ${_containers}/skopeo
|
||||
|
||||
go version
|
||||
GO111MODULE=off go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
|
||||
|
||||
cd ${_containers}/skopeo
|
||||
make validate-local test-unit-local binary-local
|
||||
sudo make install
|
||||
skopeo -v
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
|
||||
277
hack/xref-helpmsgs-manpages
Executable file
277
hack/xref-helpmsgs-manpages
Executable file
@@ -0,0 +1,277 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# xref-helpmsgs-manpages - cross-reference --help options against man pages
|
||||
#
|
||||
package LibPod::CI::XrefHelpmsgsManpages;
|
||||
|
||||
use v5.14;
|
||||
use utf8;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
(our $ME = $0) =~ s|.*/||;
|
||||
our $VERSION = '0.1';
|
||||
|
||||
# For debugging, show data structures using DumpTree($var)
|
||||
#use Data::TreeDumper; $Data::TreeDumper::Displayaddress = 0;
|
||||
|
||||
# unbuffer output
|
||||
$| = 1;
|
||||
|
||||
###############################################################################
|
||||
# BEGIN user-customizable section
|
||||
|
||||
# Path to skopeo executable
|
||||
my $Default_Skopeo = './bin/skopeo';
|
||||
my $SKOPEO = $ENV{SKOPEO} || $Default_Skopeo;
|
||||
|
||||
# Path to all doc files (markdown)
|
||||
my $Docs_Path = 'docs';
|
||||
|
||||
# Global error count
|
||||
my $Errs = 0;
|
||||
|
||||
# END user-customizable section
|
||||
###############################################################################
|
||||
|
||||
###############################################################################
|
||||
# BEGIN boilerplate args checking, usage messages
|
||||
|
||||
sub usage {
|
||||
print <<"END_USAGE";
|
||||
Usage: $ME [OPTIONS]
|
||||
|
||||
$ME recursively runs 'skopeo --help' against
|
||||
all subcommands; and recursively reads skopeo-*.1.md files
|
||||
in $Docs_Path, then cross-references that each --help
|
||||
option is listed in the appropriate man page and vice-versa.
|
||||
|
||||
$ME invokes '\$SKOPEO' (default: $Default_Skopeo).
|
||||
|
||||
Exit status is zero if no inconsistencies found, one otherwise
|
||||
|
||||
OPTIONS:
|
||||
|
||||
-v, --verbose show verbose progress indicators
|
||||
-n, --dry-run make no actual changes
|
||||
|
||||
--help display this message
|
||||
--version display program name and version
|
||||
END_USAGE
|
||||
|
||||
exit;
|
||||
}
|
||||
|
||||
# Command-line options. Note that this operates directly on @ARGV !
|
||||
our $debug = 0;
|
||||
our $verbose = 0;
|
||||
sub handle_opts {
|
||||
use Getopt::Long;
|
||||
GetOptions(
|
||||
'debug!' => \$debug,
|
||||
'verbose|v' => \$verbose,
|
||||
|
||||
help => \&usage,
|
||||
version => sub { print "$ME version $VERSION\n"; exit 0 },
|
||||
) or die "Try `$ME --help' for help\n";
|
||||
}
|
||||
|
||||
# END boilerplate args checking, usage messages
|
||||
###############################################################################
|
||||
|
||||
############################## CODE BEGINS HERE ###############################
|
||||
|
||||
# The term is "modulino".
|
||||
__PACKAGE__->main() unless caller();
|
||||
|
||||
# Main code.
|
||||
sub main {
|
||||
# Note that we operate directly on @ARGV, not on function parameters.
|
||||
# This is deliberate: it's because Getopt::Long only operates on @ARGV
|
||||
# and there's no clean way to make it use @_.
|
||||
handle_opts(); # will set package globals
|
||||
|
||||
# Fetch command-line arguments. Barf if too many.
|
||||
die "$ME: Too many arguments; try $ME --help\n" if @ARGV;
|
||||
|
||||
my $help = skopeo_help();
|
||||
my $man = skopeo_man('skopeo');
|
||||
|
||||
xref_by_help($help, $man);
|
||||
xref_by_man($help, $man);
|
||||
|
||||
exit !!$Errs;
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN cross-referencing
|
||||
|
||||
##################
|
||||
# xref_by_help # Find keys in '--help' but not in man
|
||||
##################
|
||||
sub xref_by_help {
|
||||
my ($help, $man, @subcommand) = @_;
|
||||
|
||||
for my $k (sort keys %$help) {
|
||||
if (exists $man->{$k}) {
|
||||
if (ref $help->{$k}) {
|
||||
xref_by_help($help->{$k}, $man->{$k}, @subcommand, $k);
|
||||
}
|
||||
# Otherwise, non-ref is leaf node such as a --option
|
||||
}
|
||||
else {
|
||||
my $man = $man->{_path} || 'man';
|
||||
warn "$ME: skopeo @subcommand --help lists $k, but $k not in $man\n";
|
||||
++$Errs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#################
|
||||
# xref_by_man # Find keys in man pages but not in --help
|
||||
#################
|
||||
#
|
||||
# In an ideal world we could share the functionality in one function; but
|
||||
# there are just too many special cases in man pages.
|
||||
#
|
||||
sub xref_by_man {
|
||||
my ($help, $man, @subcommand) = @_;
|
||||
|
||||
# FIXME: this generates way too much output
|
||||
for my $k (grep { $_ ne '_path' } sort keys %$man) {
|
||||
if (exists $help->{$k}) {
|
||||
if (ref $man->{$k}) {
|
||||
xref_by_man($help->{$k}, $man->{$k}, @subcommand, $k);
|
||||
}
|
||||
}
|
||||
elsif ($k ne '--help' && $k ne '-h') {
|
||||
my $man = $man->{_path} || 'man';
|
||||
|
||||
warn "$ME: skopeo @subcommand: $k in $man, but not --help\n";
|
||||
++$Errs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# END cross-referencing
|
||||
###############################################################################
|
||||
# BEGIN data gathering
|
||||
|
||||
#################
|
||||
# skopeo_help # Parse output of 'skopeo [subcommand] --help'
|
||||
#################
|
||||
sub skopeo_help {
|
||||
my %help;
|
||||
open my $fh, '-|', $SKOPEO, @_, '--help'
|
||||
or die "$ME: Cannot fork: $!\n";
|
||||
my $section = '';
|
||||
while (my $line = <$fh>) {
|
||||
# Cobra is blessedly consistent in its output:
|
||||
# Usage: ...
|
||||
# Available Commands:
|
||||
# ....
|
||||
# Options:
|
||||
# ....
|
||||
#
|
||||
# Start by identifying the section we're in...
|
||||
if ($line =~ /^Available\s+(Commands):/) {
|
||||
$section = lc $1;
|
||||
}
|
||||
elsif ($line =~ /^(Flags):/) {
|
||||
$section = lc $1;
|
||||
}
|
||||
|
||||
# ...then track commands and options. For subcommands, recurse.
|
||||
elsif ($section eq 'commands') {
|
||||
if ($line =~ /^\s{1,4}(\S+)\s/) {
|
||||
my $subcommand = $1;
|
||||
print "> skopeo @_ $subcommand\n" if $debug;
|
||||
$help{$subcommand} = skopeo_help(@_, $subcommand)
|
||||
unless $subcommand eq 'help'; # 'help' not in man
|
||||
}
|
||||
}
|
||||
elsif ($section eq 'flags') {
|
||||
# Handle '--foo' or '-f, --foo'
|
||||
if ($line =~ /^\s{1,10}(--\S+)\s/) {
|
||||
print "> skopeo @_ $1\n" if $debug;
|
||||
$help{$1} = 1;
|
||||
}
|
||||
elsif ($line =~ /^\s{1,10}(-\S),\s+(--\S+)\s/) {
|
||||
print "> skopeo @_ $1, $2\n" if $debug;
|
||||
$help{$1} = $help{$2} = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
close $fh
|
||||
or die "$ME: Error running 'skopeo @_ --help'\n";
|
||||
|
||||
return \%help;
|
||||
}
|
||||
|
||||
|
||||
################
|
||||
# skopeo_man # Parse contents of skopeo-*.1.md
|
||||
################
|
||||
sub skopeo_man {
|
||||
my $command = shift;
|
||||
my $manpath = "$Docs_Path/$command.1.md";
|
||||
print "** $manpath \n" if $debug;
|
||||
|
||||
my %man = (_path => $manpath);
|
||||
open my $fh, '<', $manpath
|
||||
or die "$ME: Cannot read $manpath: $!\n";
|
||||
my $section = '';
|
||||
my @most_recent_flags;
|
||||
my $previous_subcmd = '';
|
||||
while (my $line = <$fh>) {
|
||||
chomp $line;
|
||||
next unless $line; # skip empty lines
|
||||
|
||||
# .md files designate sections with leading double hash
|
||||
if ($line =~ /^##\s*OPTIONS/) {
|
||||
$section = 'flags';
|
||||
}
|
||||
elsif ($line =~ /^\#\#\s+(SUB)?COMMANDS/) {
|
||||
$section = 'commands';
|
||||
}
|
||||
elsif ($line =~ /^\#\#[^#]/) {
|
||||
$section = '';
|
||||
}
|
||||
|
||||
# This will be a table containing subcommand names, links to man pages.
|
||||
elsif ($section eq 'commands') {
|
||||
# In skopeo.1.md
|
||||
if ($line =~ /^\|\s*\[skopeo-(\S+?)\(\d\)\]/) {
|
||||
# $1 will be changed by recursion _*BEFORE*_ left-hand assignment
|
||||
my $subcmd = $1;
|
||||
$man{$subcmd} = skopeo_man("skopeo-$1");
|
||||
}
|
||||
}
|
||||
|
||||
# Options should always be of the form '**-f**' or '**\-\-flag**',
|
||||
# possibly separated by comma-space.
|
||||
elsif ($section eq 'flags') {
|
||||
# If option has long and short form, long must come first.
|
||||
# This is a while-loop because there may be multiple long
|
||||
# option names (not in skopeo ATM, but leave the possibility open)
|
||||
while ($line =~ s/^\*\*(--[a-z0-9.-]+)\*\*(=\*[a-zA-Z0-9-]+\*)?(,\s+)?//g) {
|
||||
$man{$1} = 1;
|
||||
}
|
||||
# Short form
|
||||
if ($line =~ s/^\*\*(-[a-zA-Z0-9.])\*\*(=\*[a-zA-Z0-9-]+\*)?//g) {
|
||||
$man{$1} = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
close $fh;
|
||||
|
||||
return \%man;
|
||||
}
|
||||
|
||||
|
||||
|
||||
# END data gathering
|
||||
###############################################################################
|
||||
|
||||
1;
|
||||
266
install.md
Normal file
266
install.md
Normal file
@@ -0,0 +1,266 @@
|
||||
# Installing Skopeo
|
||||
|
||||
## Distribution Packages
|
||||
`skopeo` may already be packaged in your distribution. This document lists the
|
||||
installation steps for many distros, along with their information and support links.
|
||||
|
||||
### Fedora
|
||||
|
||||
```sh
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://src.fedoraproject.org/rpms/skopeo) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Fedora&component=skopeo&product=Fedora)
|
||||
|
||||
Fedora bugs can be reported on the Skopeo GitHub [Issues](https://github.com/containers/skopeo/issues) page.
|
||||
|
||||
### RHEL / CentOS Stream ≥ 8
|
||||
|
||||
```sh
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
If you are a RHEL customer, please reach out through the official RHEL support
|
||||
channels for any issues.
|
||||
|
||||
CentOS Stream 9: [Package Info](https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/tree/c9s) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%209&version=CentOS%20Stream)
|
||||
|
||||
CentOS Stream 8: [Package Info](https://git.centos.org/rpms/skopeo/tree/c8s-stream-rhel8) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%208&version=CentOS%20Stream)
|
||||
|
||||
|
||||
### RHEL/CentOS ≤ 7.x
|
||||
|
||||
```sh
|
||||
sudo yum -y install skopeo
|
||||
```
|
||||
|
||||
CentOS 7: [Package Repo](https://git.centos.org/rpms/skopeo/tree/c7-extras)
|
||||
|
||||
### openSUSE
|
||||
|
||||
```sh
|
||||
sudo zypper install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://software.opensuse.org/package/skopeo)
|
||||
|
||||
### Alpine
|
||||
|
||||
```sh
|
||||
sudo apk add skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://pkgs.alpinelinux.org/packages?name=skopeo)
|
||||
|
||||
### macOS
|
||||
|
||||
```sh
|
||||
brew install skopeo
|
||||
```
|
||||
|
||||
### Nix / NixOS
|
||||
```sh
|
||||
$ nix-env -i skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://search.nixos.org/packages?&show=skopeo&query=skopeo)
|
||||
|
||||
### Debian
|
||||
|
||||
The skopeo package is available on [Bullseye](https://packages.debian.org/bullseye/skopeo),
|
||||
and Debian Testing and Unstable.
|
||||
|
||||
```bash
|
||||
# Debian Bullseye, Testing or Unstable/Sid
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://packages.debian.org/stable/skopeo)
|
||||
|
||||
### Raspberry Pi OS arm64 (beta)
|
||||
|
||||
Raspberry Pi OS uses the standard Debian's repositories,
|
||||
so it is fully compatible with Debian's arm64 repository.
|
||||
You can simply follow the [steps for Debian](#debian) to install Skopeo.
|
||||
|
||||
|
||||
### Ubuntu
|
||||
|
||||
The skopeo package is available in the official repositories for Ubuntu 20.10
|
||||
and newer.
|
||||
|
||||
```bash
|
||||
# Ubuntu 20.10 and newer
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://packages.ubuntu.com/jammy/skopeo)
|
||||
|
||||
### Windows
|
||||
Skopeo has not yet been packaged for Windows. There is an [open feature
|
||||
request](https://github.com/containers/skopeo/issues/715) and contributions are
|
||||
always welcome.
|
||||
|
||||
|
||||
## Container Images
|
||||
|
||||
Skopeo container images are available at `quay.io/skopeo/stable:latest`.
|
||||
For example,
|
||||
|
||||
```bash
|
||||
podman run docker://quay.io/skopeo/stable:latest copy --help
|
||||
```
|
||||
|
||||
[Read more](./contrib/skopeoimage/README.md).
|
||||
|
||||
|
||||
## Building from Source
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
To build the `skopeo` binary you need at least Go 1.12.
|
||||
|
||||
There are two ways to build skopeo: in a container, or locally without a
|
||||
container. Choose the one which better matches your needs and environment.
|
||||
|
||||
### Building without a container
|
||||
|
||||
Building without a container requires a bit more manual work and setup in your
|
||||
environment, but it is more flexible:
|
||||
|
||||
- It should work in more environments (e.g. for native macOS builds)
|
||||
- It does not require root privileges (after dependencies are installed)
|
||||
- It is faster, therefore more convenient for developing `skopeo`.
|
||||
|
||||
Install the necessary dependencies:
|
||||
|
||||
```bash
|
||||
# Fedora:
|
||||
sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel
|
||||
```
|
||||
|
||||
```bash
|
||||
# Ubuntu (`libbtrfs-dev` requires Ubuntu 18.10 and above):
|
||||
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev pkg-config
|
||||
```
|
||||
|
||||
```bash
|
||||
# macOS:
|
||||
brew install gpgme
|
||||
```
|
||||
|
||||
```bash
|
||||
# openSUSE:
|
||||
sudo zypper install libgpgme-devel device-mapper-devel libbtrfs-devel glib2-devel
|
||||
```
|
||||
|
||||
Make sure to clone this repository in your `GOPATH` - otherwise compilation fails.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
|
||||
cd $GOPATH/src/github.com/containers/skopeo && make bin/skopeo
|
||||
```
|
||||
|
||||
By default the `make` command (make all) will build bin/skopeo and the documentation locally.
|
||||
|
||||
Building of documentation requires `go-md2man`. On systems that do not have this tool, the
|
||||
document generation can be skipped by passing `DISABLE_DOCS=1`:
|
||||
```
|
||||
DISABLE_DOCS=1 make
|
||||
```
|
||||
|
||||
### Building documentation
|
||||
|
||||
To build the manual you will need go-md2man.
|
||||
|
||||
```bash
|
||||
# Debian:
|
||||
sudo apt-get install go-md2man
|
||||
```
|
||||
|
||||
```
|
||||
# Fedora:
|
||||
sudo dnf install go-md2man
|
||||
```
|
||||
|
||||
```
|
||||
# MacOS:
|
||||
brew install go-md2man
|
||||
```
|
||||
|
||||
Then
|
||||
|
||||
```bash
|
||||
make docs
|
||||
```
|
||||
|
||||
### Building in a container
|
||||
|
||||
Building in a container is simpler, but more restrictive:
|
||||
|
||||
- It requires the `podman` command and the ability to run Linux containers.
|
||||
- The created executable is a Linux executable, and depends on dynamic libraries
|
||||
which may only be available only in a container of a similar Linux
|
||||
distribution.
|
||||
|
||||
```bash
|
||||
$ make binary
|
||||
```
|
||||
|
||||
### Shell completion scripts
|
||||
|
||||
Skopeo has shell completion scripts for bash, zsh, fish and powershell. They are installed as part of `make install`.
|
||||
You may have to restart your shell in order for them to take effect.
|
||||
|
||||
For instructions to manually generate and load the scripts please see `skopeo completion --help`.
|
||||
|
||||
### Installation
|
||||
|
||||
Finally, after the binary and documentation is built:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Building a static binary
|
||||
|
||||
There have been efforts in the past to produce and maintain static builds, but the maintainers prefer to run Skopeo using distro packages or within containers. This is because static builds of Skopeo tend to be unreliable and functionally restricted. Specifically:
|
||||
- Some features of Skopeo depend on non-Go libraries like `libgpgme` and `libdevmapper`.
|
||||
- Generating static Go binaries uses native Go libraries, which don't support e.g. `.local` or LDAP-based name resolution.
|
||||
|
||||
That being said, if you would like to build Skopeo statically, you might be able to do it by combining all the following steps.
|
||||
- Export environment variable `CGO_ENABLED=0` (disabling CGO causes Go to prefer native libraries when possible, instead of dynamically linking against system libraries).
|
||||
- Set the `BUILDTAGS=containers_image_openpgp` Make variable (this remove the dependency on `libgpgme` and its companion libraries).
|
||||
- Clear the `GO_DYN_FLAGS` Make variable (which otherwise seems to force the creation of a dynamic executable).
|
||||
|
||||
The following command implements these steps to produce a static binary in the `bin` subdirectory of the repository:
|
||||
|
||||
```bash
|
||||
docker run -v $PWD:/src -w /src -e CGO_ENABLED=0 golang \
|
||||
make BUILDTAGS=containers_image_openpgp GO_DYN_FLAGS=
|
||||
```
|
||||
|
||||
Keep in mind that the resulting binary is unsupported and might crash randomly. Only use if you know what you're doing!
|
||||
|
||||
For more information, history, and context about static builds, check the following issues:
|
||||
|
||||
- [#391] - Consider distributing statically built binaries as part of release
|
||||
- [#669] - Static build fails with segmentation violation
|
||||
- [#670] - Fixing static binary build using container
|
||||
- [#755] - Remove static and in-container targets from Makefile
|
||||
- [#932] - Add nix derivation for static builds
|
||||
- [#1336] - Unable to run skopeo on Fedora 30 (due to dyn lib dependency)
|
||||
- [#1478] - Publish binary releases to GitHub (request+discussion)
|
||||
|
||||
[#391]: https://github.com/containers/skopeo/issues/391
|
||||
[#669]: https://github.com/containers/skopeo/issues/669
|
||||
[#670]: https://github.com/containers/skopeo/issues/670
|
||||
[#755]: https://github.com/containers/skopeo/issues/755
|
||||
[#932]: https://github.com/containers/skopeo/issues/932
|
||||
[#1336]: https://github.com/containers/skopeo/issues/1336
|
||||
[#1478]: https://github.com/containers/skopeo/issues/1478
|
||||
@@ -1,7 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/go-check/check"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
const blockedRegistriesConf = "./fixtures/blocked-registries.conf"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/go-check/check"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,25 +30,18 @@ type SkopeoSuite struct {
|
||||
func (s *SkopeoSuite) SetUpSuite(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) SetUpTest(c *check.C) {
|
||||
s.regV2 = setupRegistryV2At(c, privateRegistryURL0, false, false)
|
||||
s.regV2WithAuth = setupRegistryV2At(c, privateRegistryURL1, true, false)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TearDownTest(c *check.C) {
|
||||
func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
if s.regV2 != nil {
|
||||
s.regV2.Close()
|
||||
s.regV2.tearDown(c)
|
||||
}
|
||||
if s.regV2WithAuth != nil {
|
||||
//cmd := exec.Command("docker", "logout", s.regV2WithAuth)
|
||||
//c.Assert(cmd.Run(), check.IsNil)
|
||||
s.regV2WithAuth.Close()
|
||||
s.regV2WithAuth.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +64,7 @@ func (s *SkopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
|
||||
wanted := ".*flag provided but not defined: -cert-path.*"
|
||||
wanted := ".*unknown flag: --cert-path.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
|
||||
@@ -91,3 +84,30 @@ func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C)
|
||||
func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
assertSkopeoFails(c, `.*Invalid image name.*`, "inspect", "unknown")
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestLoginLogout(c *check.C) {
|
||||
wanted := "^Login Succeeded!\n$"
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// test --get-login returns username
|
||||
wanted = fmt.Sprintf("^%s\n$", s.regV2WithAuth.username)
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--get-login", s.regV2WithAuth.url)
|
||||
// test logout
|
||||
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
|
||||
wanted := "^Login Succeeded!\n$"
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// copy to private registry using local authentication
|
||||
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName)
|
||||
// inspect from private registry
|
||||
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
|
||||
// logout from the registry
|
||||
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
|
||||
// inspect from private registry should fail after logout
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "inspect", "--tls-verify=false", imageName)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
23
integration/decompress-dirs.sh
Executable file
23
integration/decompress-dirs.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash -e
|
||||
# Account for differences between dir: images that are solely due to one being
|
||||
# compressed (fresh from a registry) and the other not being compressed (read
|
||||
# from storage, which decompressed it and had to reassemble the layer blobs).
|
||||
for dir in "$@" ; do
|
||||
# Updating the manifest's blob digests may change the formatting, so
|
||||
# use jq to get them into similar shape.
|
||||
jq -M . "${dir}"/manifest.json > "${dir}"/manifest.json.tmp && mv "${dir}"/manifest.json.tmp "${dir}"/manifest.json
|
||||
for candidate in "${dir}"/???????????????????????????????????????????????????????????????? ; do
|
||||
# If a digest-identified file looks like it was compressed,
|
||||
# decompress it, and replace its hash and size in the manifest
|
||||
# with the values for their decompressed versions.
|
||||
uncompressed=`zcat "${candidate}" 2> /dev/null | sha256sum | cut -c1-64`
|
||||
if test $? -eq 0 ; then
|
||||
if test "$uncompressed" != e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 ; then
|
||||
zcat "${candidate}" > "${dir}"/${uncompressed}
|
||||
sed -r -i -e "s#sha256:$(basename ${candidate})#sha256:${uncompressed}#g" "${dir}"/manifest.json
|
||||
sed -r -i -e "s#\"size\": $(wc -c < ${candidate}),#\"size\": $(wc -c < ${dir}/${uncompressed}),#g" "${dir}"/manifest.json
|
||||
rm -f "${candidate}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
@@ -20,10 +20,41 @@
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/mirroring-primary": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/mirroring-mirror": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/mirroring-remap": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "remapIdentity",
|
||||
"prefix": "localhost:5006/myns/mirroring-remap",
|
||||
"signedPrefix": "localhost:5006/myns/mirroring-primary"
|
||||
}
|
||||
}
|
||||
],
|
||||
"docker.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
],
|
||||
"quay.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
},
|
||||
"dir": {
|
||||
|
||||
@@ -26,3 +26,9 @@ mirror = [
|
||||
{ location = "wrong-mirror-0.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
|
||||
[[registry]]
|
||||
location = "localhost:5006/myns/mirroring-primary"
|
||||
mirror = [
|
||||
{ location = "localhost:5006/myns/mirroring-mirror"},
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
docker:
|
||||
localhost:5555:
|
||||
sigstore: file://@sigstore@
|
||||
lookaside: file://@lookaside@
|
||||
localhost:5555/public:
|
||||
sigstore-staging: file://@split-staging@
|
||||
sigstore: @split-read@
|
||||
lookaside-staging: file://@split-staging@
|
||||
lookaside: @split-read@
|
||||
|
||||
@@ -2,16 +2,17 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/go-check/check"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
var adminKUBECONFIG = map[string]string{
|
||||
@@ -22,6 +23,7 @@ var adminKUBECONFIG = map[string]string{
|
||||
// running on localhost.
|
||||
type openshiftCluster struct {
|
||||
workingDir string
|
||||
dockerDir string
|
||||
processes []*exec.Cmd // Processes to terminate on teardown; append to the end, terminate from end to the start.
|
||||
}
|
||||
|
||||
@@ -30,10 +32,7 @@ type openshiftCluster struct {
|
||||
// in isolated test environment.
|
||||
func startOpenshiftCluster(c *check.C) *openshiftCluster {
|
||||
cluster := &openshiftCluster{}
|
||||
|
||||
dir, err := ioutil.TempDir("", "openshift-cluster")
|
||||
c.Assert(err, check.IsNil)
|
||||
cluster.workingDir = dir
|
||||
cluster.workingDir = c.MkDir()
|
||||
|
||||
cluster.startMaster(c)
|
||||
cluster.prepareRegistryConfig(c)
|
||||
@@ -61,6 +60,7 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
cmd := cluster.clusterCmd(nil, "openshift", "start", "master")
|
||||
cluster.processes = append(cluster.processes, cmd)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
c.Assert(err, check.IsNil)
|
||||
// Send both to the same pipe. This might cause the two streams to be mixed up,
|
||||
// but logging actually goes only to stderr - this primarily ensure we log any
|
||||
// unexpected output to stdout.
|
||||
@@ -107,6 +107,8 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
|
||||
gotPortCheck := false
|
||||
gotLogCheck := false
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
for !gotPortCheck || !gotLogCheck {
|
||||
c.Logf("Waiting for master")
|
||||
select {
|
||||
@@ -119,6 +121,8 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
c.Fatal("log check done, success message not found")
|
||||
}
|
||||
gotLogCheck = true
|
||||
case <-ctx.Done():
|
||||
c.Fatalf("Timed out waiting for master: %v", ctx.Err())
|
||||
}
|
||||
}
|
||||
c.Logf("OK, master started!")
|
||||
@@ -164,8 +168,14 @@ func (cluster *openshiftCluster) startRegistryProcess(c *check.C, port int, conf
|
||||
terminatePortCheck <- true
|
||||
}()
|
||||
c.Logf("Waiting for registry to start")
|
||||
<-portOpen
|
||||
c.Logf("OK, Registry port open")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-portOpen:
|
||||
c.Logf("OK, Registry port open")
|
||||
case <-ctx.Done():
|
||||
c.Fatalf("Timed out waiting for registry to start: %v", ctx.Err())
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -182,7 +192,7 @@ func (cluster *openshiftCluster) startRegistry(c *check.C) {
|
||||
// The default configuration currently already contains acceptschema2: false
|
||||
})
|
||||
// Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated.
|
||||
configContents, err := ioutil.ReadFile(schema1Config)
|
||||
configContents, err := os.ReadFile(schema1Config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*")
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config))
|
||||
@@ -211,8 +221,8 @@ func (cluster *openshiftCluster) ocLoginToProject(c *check.C) {
|
||||
// dockerLogin simulates (docker login) to the cluster, or terminates on failure.
|
||||
// We do not run (docker login) directly, because that requires a running daemon and a docker package.
|
||||
func (cluster *openshiftCluster) dockerLogin(c *check.C) {
|
||||
dockerDir := filepath.Join(homedir.Get(), ".docker")
|
||||
err := os.Mkdir(dockerDir, 0700)
|
||||
cluster.dockerDir = filepath.Join(homedir.Get(), ".docker")
|
||||
err := os.Mkdir(cluster.dockerDir, 0700)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "oc", "config", "view", "-o", "json", "-o", "jsonpath={.users[*].user.token}")
|
||||
@@ -226,7 +236,7 @@ func (cluster *openshiftCluster) dockerLogin(c *check.C) {
|
||||
}`, port, authValue))
|
||||
}
|
||||
configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}`
|
||||
err = ioutil.WriteFile(filepath.Join(dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
err = os.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
@@ -244,9 +254,12 @@ func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) {
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
func (cluster *openshiftCluster) tearDown(c *check.C) {
|
||||
for i := len(cluster.processes) - 1; i >= 0; i-- {
|
||||
cluster.processes[i].Process.Kill()
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.workingDir != "" {
|
||||
os.RemoveAll(cluster.workingDir)
|
||||
if cluster.dockerDir != "" {
|
||||
err := os.RemoveAll(cluster.dockerDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build openshift_shell
|
||||
// +build openshift_shell
|
||||
|
||||
package main
|
||||
@@ -6,7 +7,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/go-check/check"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -19,8 +20,8 @@ to start a container, then within the container:
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
cd ..; make binary-local install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
curl -L -v 'http://localhost:5000/v2/'
|
||||
cat ~/.docker/config.json
|
||||
|
||||
12
integration/procutils.go
Normal file
12
integration/procutils.go
Normal file
@@ -0,0 +1,12 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// cmdLifecycleToParentIfPossible tries to exit if the parent process exits (only works on Linux)
|
||||
func cmdLifecycleToParentIfPossible(c *exec.Cmd) {
|
||||
}
|
||||
14
integration/procutils_linux.go
Normal file
14
integration/procutils_linux.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// cmdLifecyleToParentIfPossible is a thin wrapper around prctl(PR_SET_PDEATHSIG)
|
||||
// on Linux.
|
||||
func cmdLifecycleToParentIfPossible(c *exec.Cmd) {
|
||||
c.SysProcAttr = &syscall.SysProcAttr{
|
||||
Pdeathsig: syscall.SIGTERM,
|
||||
}
|
||||
}
|
||||
310
integration/proxy_test.go
Normal file
310
integration/proxy_test.go
Normal file
@@ -0,0 +1,310 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// This image is known to be x86_64 only right now
|
||||
const knownNotManifestListedImage_x8664 = "docker://quay.io/coreos/11bot"
|
||||
|
||||
const expectedProxySemverMajor = "0.2"
|
||||
|
||||
// request is copied from proxy.go
|
||||
// We intentionally copy to ensure that we catch any unexpected "API" changes
|
||||
// in the JSON.
|
||||
type request struct {
|
||||
// Method is the name of the function
|
||||
Method string `json:"method"`
|
||||
// Args is the arguments (parsed inside the function)
|
||||
Args []interface{} `json:"args"`
|
||||
}
|
||||
|
||||
// reply is copied from proxy.go
|
||||
type reply struct {
|
||||
// Success is true if and only if the call succeeded.
|
||||
Success bool `json:"success"`
|
||||
// Value is an arbitrary value (or values, as array/map) returned from the call.
|
||||
Value interface{} `json:"value"`
|
||||
// PipeID is an index into open pipes, and should be passed to FinishPipe
|
||||
PipeID uint32 `json:"pipeid"`
|
||||
// Error should be non-empty if Success == false
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// maxMsgSize is also copied from proxy.go
|
||||
const maxMsgSize = 32 * 1024
|
||||
|
||||
type proxy struct {
|
||||
c *net.UnixConn
|
||||
}
|
||||
|
||||
type pipefd struct {
|
||||
// id is the remote identifier "pipeid"
|
||||
id uint
|
||||
fd *os.File
|
||||
}
|
||||
|
||||
func (p *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
req := request{
|
||||
Method: method,
|
||||
Args: args,
|
||||
}
|
||||
reqbuf, err := json.Marshal(&req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err := p.c.Write(reqbuf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(reqbuf) {
|
||||
err = fmt.Errorf("short write during call of %d bytes", n)
|
||||
return
|
||||
}
|
||||
oob := make([]byte, syscall.CmsgSpace(1))
|
||||
replybuf := make([]byte, maxMsgSize)
|
||||
n, oobn, _, _, err := p.c.ReadMsgUnix(replybuf, oob)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("reading reply: %v", err)
|
||||
return
|
||||
}
|
||||
var reply reply
|
||||
err = json.Unmarshal(replybuf[0:n], &reply)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to parse reply: %w", err)
|
||||
return
|
||||
}
|
||||
if !reply.Success {
|
||||
err = fmt.Errorf("remote error: %s", reply.Error)
|
||||
return
|
||||
}
|
||||
|
||||
if reply.PipeID > 0 {
|
||||
var scms []syscall.SocketControlMessage
|
||||
scms, err = syscall.ParseSocketControlMessage(oob[:oobn])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to parse control message: %v", err)
|
||||
return
|
||||
}
|
||||
if len(scms) != 1 {
|
||||
err = fmt.Errorf("Expected 1 received fd, found %d", len(scms))
|
||||
return
|
||||
}
|
||||
var fds []int
|
||||
fds, err = syscall.ParseUnixRights(&scms[0])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to parse unix rights: %v", err)
|
||||
return
|
||||
}
|
||||
fd = &pipefd{
|
||||
fd: os.NewFile(uintptr(fds[0]), "replyfd"),
|
||||
id: uint(reply.PipeID),
|
||||
}
|
||||
}
|
||||
|
||||
rval = reply.Value
|
||||
return
|
||||
}
|
||||
|
||||
func (p *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = p.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if fd != nil {
|
||||
err = fmt.Errorf("Unexpected fd from method %s", method)
|
||||
return
|
||||
}
|
||||
return rval, nil
|
||||
}
|
||||
|
||||
func (p *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = p.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if fd == nil {
|
||||
err = fmt.Errorf("Expected fd from method %s", method)
|
||||
return
|
||||
}
|
||||
fetchchan := make(chan byteFetch)
|
||||
go func() {
|
||||
manifestBytes, err := io.ReadAll(fd.fd)
|
||||
fetchchan <- byteFetch{
|
||||
content: manifestBytes,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
_, err = p.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case fetchRes := <-fetchchan:
|
||||
err = fetchRes.err
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf = fetchRes.content
|
||||
case <-time.After(5 * time.Minute):
|
||||
err = fmt.Errorf("timed out during proxy fetch")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newProxy() (*proxy, error) {
|
||||
fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
myfd := os.NewFile(uintptr(fds[0]), "myfd")
|
||||
defer myfd.Close()
|
||||
theirfd := os.NewFile(uintptr(fds[1]), "theirfd")
|
||||
defer theirfd.Close()
|
||||
|
||||
mysock, err := net.FileConn(myfd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note ExtraFiles starts at 3
|
||||
proc := exec.Command("skopeo", "experimental-image-proxy", "--sockfd", "3")
|
||||
proc.Stderr = os.Stderr
|
||||
cmdLifecycleToParentIfPossible(proc)
|
||||
proc.ExtraFiles = append(proc.ExtraFiles, theirfd)
|
||||
|
||||
if err = proc.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &proxy{
|
||||
c: mysock.(*net.UnixConn),
|
||||
}
|
||||
|
||||
v, err := p.callNoFd("Initialize", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
semver, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("proxy Initialize: Unexpected value %T", v)
|
||||
}
|
||||
if !strings.HasPrefix(semver, expectedProxySemverMajor) {
|
||||
return nil, fmt.Errorf("Unexpected semver %s", semver)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&ProxySuite{})
|
||||
}
|
||||
|
||||
type ProxySuite struct {
|
||||
}
|
||||
|
||||
func (s *ProxySuite) SetUpSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *ProxySuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
type byteFetch struct {
|
||||
content []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
v, err := p.callNoFd("OpenImage", []interface{}{knownNotManifestListedImage_x8664})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgidv, ok := v.(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("OpenImage return value is %T", v)
|
||||
}
|
||||
imgid := uint32(imgidv)
|
||||
|
||||
_, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = manifest.OCI1FromManifest(manifestBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, configBytes, err := p.callReadAllBytes("GetFullConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var config imgspecv1.Image
|
||||
err = json.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate that the image config seems sane
|
||||
if config.Architecture == "" {
|
||||
return fmt.Errorf("No architecture found")
|
||||
}
|
||||
if len(config.Config.Cmd) == 0 && len(config.Config.Entrypoint) == 0 {
|
||||
return fmt.Errorf("No CMD or ENTRYPOINT set")
|
||||
}
|
||||
|
||||
// Also test this legacy interface
|
||||
_, ctrconfigBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ctrconfig imgspecv1.ImageConfig
|
||||
err = json.Unmarshal(ctrconfigBytes, &ctrconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate that the config seems sane
|
||||
if len(ctrconfig.Cmd) == 0 && len(ctrconfig.Entrypoint) == 0 {
|
||||
return fmt.Errorf("No CMD or ENTRYPOINT set")
|
||||
}
|
||||
|
||||
_, err = p.callNoFd("CloseImage", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ProxySuite) TestProxy(c *check.C) {
|
||||
p, err := newProxy()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = runTestGetManifestAndConfig(p, knownNotManifestListedImage_x8664)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Testing image %s: %v", knownNotManifestListedImage_x8664, err)
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = runTestGetManifestAndConfig(p, knownListImage)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Testing image %s: %v", knownListImage, err)
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -2,14 +2,13 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/go-check/check"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -20,7 +19,6 @@ const (
|
||||
type testRegistryV2 struct {
|
||||
cmd *exec.Cmd
|
||||
url string
|
||||
dir string
|
||||
username string
|
||||
password string
|
||||
email string
|
||||
@@ -45,10 +43,7 @@ func setupRegistryV2At(c *check.C, url string, auth, schema1 bool) *testRegistry
|
||||
}
|
||||
|
||||
func newTestRegistryV2At(c *check.C, url string, auth, schema1 bool) (*testRegistryV2, error) {
|
||||
tmp, err := ioutil.TempDir("", "registry-test-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp := c.MkDir()
|
||||
template := `version: 0.1
|
||||
loglevel: debug
|
||||
storage:
|
||||
@@ -58,6 +53,9 @@ storage:
|
||||
enabled: true
|
||||
http:
|
||||
addr: %s
|
||||
compatibility:
|
||||
schema1:
|
||||
enabled: true
|
||||
%s`
|
||||
var (
|
||||
htpasswd string
|
||||
@@ -71,7 +69,7 @@ http:
|
||||
username = "testuser"
|
||||
password = "testpassword"
|
||||
email = "test@test.org"
|
||||
if err := ioutil.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
if err := os.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
htpasswd = fmt.Sprintf(`auth:
|
||||
@@ -86,19 +84,18 @@ http:
|
||||
return nil, err
|
||||
}
|
||||
if _, err := fmt.Fprintf(config, template, tmp, url, htpasswd); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
binary := binaryV2
|
||||
var cmd *exec.Cmd
|
||||
if schema1 {
|
||||
binary = binaryV2Schema1
|
||||
cmd = exec.Command(binaryV2Schema1, confPath)
|
||||
} else {
|
||||
cmd = exec.Command(binaryV2, "serve", confPath)
|
||||
}
|
||||
|
||||
cmd := exec.Command(binary, confPath)
|
||||
consumeAndLogOutputs(c, fmt.Sprintf("registry-%s", url), cmd)
|
||||
if err := cmd.Start(); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
if os.IsNotExist(err) {
|
||||
c.Skip(err.Error())
|
||||
}
|
||||
@@ -107,7 +104,6 @@ http:
|
||||
return &testRegistryV2{
|
||||
cmd: cmd,
|
||||
url: url,
|
||||
dir: tmp,
|
||||
username: username,
|
||||
password: password,
|
||||
email: email,
|
||||
@@ -126,7 +122,8 @@ func (t *testRegistryV2) Ping() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) Close() {
|
||||
t.cmd.Process.Kill()
|
||||
os.RemoveAll(t.dir)
|
||||
func (t *testRegistryV2) tearDown(c *check.C) {
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = t.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
@@ -3,13 +3,12 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,7 +20,6 @@ func init() {
|
||||
}
|
||||
|
||||
type SigningSuite struct {
|
||||
gpgHome string
|
||||
fingerprint string
|
||||
}
|
||||
|
||||
@@ -40,25 +38,18 @@ func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
s.gpgHome, err = ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.fingerprint, err = findFingerprint(lines)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SigningSuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
err := os.RemoveAll(s.gpgHome)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
s.gpgHome = ""
|
||||
|
||||
os.Unsetenv("GNUPGHOME")
|
||||
}
|
||||
|
||||
@@ -73,7 +64,7 @@ func (s *SigningSuite) TestSignVerifySmoke(c *check.C) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/smoketest"
|
||||
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", sigOutput.Name(),
|
||||
|
||||
592
integration/sync_test.go
Normal file
592
integration/sync_test.go
Normal file
@@ -0,0 +1,592 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// A repository with a path with multiple components in it which
|
||||
// contains multiple tags, preferably with some tags pointing to
|
||||
// manifest lists, and with some tags that don't.
|
||||
pullableRepo = "k8s.gcr.io/coredns/coredns"
|
||||
// A tagged image in the repository that we can inspect and copy.
|
||||
pullableTaggedImage = "k8s.gcr.io/coredns/coredns:v1.6.6"
|
||||
// A tagged manifest list in the repository that we can inspect and copy.
|
||||
pullableTaggedManifestList = "k8s.gcr.io/coredns/coredns:v1.8.0"
|
||||
// A repository containing multiple tags, some of which are for
|
||||
// manifest lists, and which includes a "latest" tag. We specify the
|
||||
// name here without a tag.
|
||||
pullableRepoWithLatestTag = "k8s.gcr.io/pause"
|
||||
)
|
||||
|
||||
func init() {
|
||||
check.Suite(&SyncSuite{})
|
||||
}
|
||||
|
||||
type SyncSuite struct {
|
||||
cluster *openshiftCluster
|
||||
registry *testRegistryV2
|
||||
}
|
||||
|
||||
func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
const registryAuth = false
|
||||
const registrySchema1 = false
|
||||
|
||||
if os.Getenv("SKOPEO_LOCAL_TESTS") == "1" {
|
||||
c.Log("Running tests without a container")
|
||||
fmt.Printf("NOTE: tests requires a V2 registry at url=%s, with auth=%t, schema1=%t \n", v2DockerRegistryURL, registryAuth, registrySchema1)
|
||||
return
|
||||
}
|
||||
|
||||
if os.Getenv("SKOPEO_CONTAINER_TESTS") != "1" {
|
||||
c.Skip("Not running in a container, refusing to affect user state")
|
||||
}
|
||||
|
||||
s.cluster = startOpenshiftCluster(c) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
|
||||
|
||||
for _, stream := range []string{"unsigned", "personal", "official", "naming", "cosigned", "compression", "schema1", "schema2"} {
|
||||
isJSON := fmt.Sprintf(`{
|
||||
"kind": "ImageStream",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "%s"
|
||||
},
|
||||
"spec": {}
|
||||
}`, stream)
|
||||
runCommandWithInput(c, isJSON, "oc", "create", "-f", "-")
|
||||
}
|
||||
|
||||
// FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, registryAuth, registrySchema1)
|
||||
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n",
|
||||
key, key)
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := os.WriteFile(filepath.Join(gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TearDownSuite(c *check.C) {
|
||||
if os.Getenv("SKOPEO_LOCAL_TESTS") == "1" {
|
||||
return
|
||||
}
|
||||
|
||||
if s.registry != nil {
|
||||
s.registry.tearDown(c)
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
func assertNumberOfManifestsInSubdirs(c *check.C, dir string, expectedCount int) {
|
||||
nManifests := 0
|
||||
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, expectedCount)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
|
||||
// sync docker => dir
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+dir2)
|
||||
_, err = os.Stat(path.Join(dir2, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", path.Join(dir1, imagePath), dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
|
||||
// sync docker => dir
|
||||
assertSkopeoSucceeds(c, "", "sync", "--all", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://"+image, "dir:"+dir2)
|
||||
_, err = os.Stat(path.Join(dir2, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", path.Join(dir1, imagePath), dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestPreserveDigests(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "--preserve-digests", "docker://"+image, "dir:"+tmpDir)
|
||||
_, err := os.Stat(path.Join(tmpDir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", "--all", "--preserve-digests", "--format=oci", "docker://"+image, "dir:"+tmpDir)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepoWithLatestTag
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
// make a copy of the image in the local registry
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())))
|
||||
|
||||
//sync upstream image to dir, not scoped
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
//sync local registry image to dir, not scoped
|
||||
assertSkopeoFails(c, ".*Refusing to overwrite destination directory.*", "sync", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
|
||||
|
||||
//sync local registry image to dir, scoped
|
||||
imageRef, err = docker.ParseReference(fmt.Sprintf("//%s", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference()))))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath = imageRef.DockerReference().String()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepo
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
|
||||
sysCtx := types.SystemContext{}
|
||||
tags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, imageRef)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(len(tags), check.Not(check.Equals), 0)
|
||||
|
||||
nManifests, err := filepath.Glob(path.Join(dir1, path.Dir(imagePath), "*", "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(nManifests), check.Equals, len(tags))
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
image := pullableRepo
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().Name()
|
||||
|
||||
sysCtx := types.SystemContext{}
|
||||
tags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, imageRef)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(len(tags), check.Not(check.Equals), 0)
|
||||
|
||||
yamlConfig := fmt.Sprintf(`
|
||||
%s:
|
||||
images:
|
||||
%s: []
|
||||
`, reference.Domain(imageRef.DockerReference()), reference.Path(imageRef.DockerReference()))
|
||||
|
||||
// sync to the local registry
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL)
|
||||
// sync back from local registry to a folder
|
||||
os.Remove(yamlFile)
|
||||
yamlConfig = fmt.Sprintf(`
|
||||
%s:
|
||||
tls-verify: false
|
||||
images:
|
||||
%s: []
|
||||
`, v2DockerRegistryURL, imagePath)
|
||||
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
sysCtx = types.SystemContext{
|
||||
DockerInsecureSkipTLSVerify: types.NewOptionalBool(true),
|
||||
}
|
||||
localImageRef, err := docker.ParseReference(fmt.Sprintf("//%s/%s", v2DockerRegistryURL, imagePath))
|
||||
c.Assert(err, check.IsNil)
|
||||
localTags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, localImageRef)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(len(localTags), check.Not(check.Equals), 0)
|
||||
c.Assert(len(localTags), check.Equals, len(tags))
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, len(tags))
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
images-by-tag-regex:
|
||||
pause: ^[12]\.0$ # regex string test
|
||||
`
|
||||
// the ↑ regex strings always matches only 2 images
|
||||
var nTags = 2
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
images:
|
||||
pause:
|
||||
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
|
||||
`
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, 1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYaml2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
images:
|
||||
coredns/coredns:
|
||||
- v1.8.0
|
||||
- v1.7.1
|
||||
k8s-dns-kube-dns:
|
||||
- 1.14.12
|
||||
- 1.14.13
|
||||
pause:
|
||||
- latest
|
||||
|
||||
quay.io:
|
||||
images:
|
||||
quay/busybox:
|
||||
- latest`
|
||||
|
||||
// get the number of tags
|
||||
re := regexp.MustCompile(`^ +- +[^:/ ]+`)
|
||||
var nTags int
|
||||
for _, l := range strings.Split(yamlConfig, "\n") {
|
||||
if re.MatchString(l) {
|
||||
nTags++
|
||||
}
|
||||
}
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
image := pullableRepoWithLatestTag
|
||||
tag := "latest"
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// copy docker => docker
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image+":"+tag, localRegURL+image+":"+tag)
|
||||
|
||||
yamlTemplate := `
|
||||
%s:
|
||||
%s
|
||||
images:
|
||||
%s:
|
||||
- %s`
|
||||
|
||||
testCfg := []struct {
|
||||
tlsVerify string
|
||||
msg string
|
||||
checker func(c *check.C, regexp string, args ...string)
|
||||
}{
|
||||
{
|
||||
tlsVerify: "tls-verify: false",
|
||||
msg: "",
|
||||
checker: assertSkopeoSucceeds,
|
||||
},
|
||||
{
|
||||
tlsVerify: "tls-verify: true",
|
||||
msg: ".*server gave HTTP response to HTTPS client.*",
|
||||
checker: assertSkopeoFails,
|
||||
},
|
||||
// no "tls-verify" line means default TLS verify must be ON
|
||||
{
|
||||
tlsVerify: "",
|
||||
msg: ".*server gave HTTP response to HTTPS client.*",
|
||||
checker: assertSkopeoFails,
|
||||
},
|
||||
}
|
||||
|
||||
for _, cfg := range testCfg {
|
||||
yamlConfig := fmt.Sprintf(yamlTemplate, v2DockerRegistryURL, cfg.tlsVerify, image, tag)
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
cfg.checker(c, cfg.msg, "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
os.Remove(yamlFile)
|
||||
os.RemoveAll(dir1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
destDir1 := filepath.Join(tmpDir, "dest1")
|
||||
destDir2 := filepath.Join(tmpDir, "dest2")
|
||||
destDir3 := filepath.Join(tmpDir, "dest3")
|
||||
|
||||
//Split image:tag path from image URI for manifest comparison
|
||||
imageDir := pullableTaggedImage[strings.LastIndex(pullableTaggedImage, "/")+1:]
|
||||
|
||||
assertSkopeoSucceeds(c, "", "sync", "--format=oci", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir1)
|
||||
verifyManifestMIMEType(c, filepath.Join(destDir1, imageDir), imgspecv1.MediaTypeImageManifest)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--format=v2s2", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir2)
|
||||
verifyManifestMIMEType(c, filepath.Join(destDir2, imageDir), manifest.DockerV2Schema2MediaType)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--format=v2s1", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir3)
|
||||
verifyManifestMIMEType(c, filepath.Join(destDir3, imageDir), manifest.DockerV2Schema1SignedMediaType)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
|
||||
// sync docker => docker
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "docker", "--dest", "docker", image, v2DockerRegistryURL)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+dir1)
|
||||
_, err = os.Stat(path.Join(dir1, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", localRegURL+imagePath, "dir:"+dir2)
|
||||
_, err = os.Stat(path.Join(dir2, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepoWithLatestTag
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
err := os.Mkdir(dir1, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
err = os.Mkdir(dir2, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// create leading dirs
|
||||
err = os.MkdirAll(path.Dir(path.Join(dir1, image)), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+path.Join(dir1, image))
|
||||
_, err = os.Stat(path.Join(dir1, image, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// sync dir => docker
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", dir1, v2DockerRegistryURL)
|
||||
|
||||
// create leading dirs
|
||||
err = os.MkdirAll(path.Dir(path.Join(dir2, image)), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", localRegURL+image, "dir:"+path.Join(dir2, image))
|
||||
_, err = os.Stat(path.Join(dir2, image, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
|
||||
// sync dir => dir is not allowed
|
||||
assertSkopeoFails(c, ".*sync from 'dir' to 'dir' not implemented.*", "sync", "--scoped", "--src", "dir", "--dest", "dir", dir1, dir2)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
assertSkopeoFails(c, ".*No images to sync found in .*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL)
|
||||
|
||||
assertSkopeoFails(c, ".*No images to sync found in .*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "docker", "--dest", "docker", "hopefully_no_images_will_ever_be_called_like_this", v2DockerRegistryURL)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
const regURL = "google.com/namespace/imagename"
|
||||
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", regURL, tmpDir)
|
||||
|
||||
//tagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", regURL+":thetag", tmpDir)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
const repo = "privateimagenamethatshouldnotbepublic"
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*Registry disallows tag list retrieval.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", repo, tmpDir)
|
||||
|
||||
//tagged
|
||||
assertSkopeoFails(c, ".*unauthorized: authentication required.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
repo := path.Join(v2DockerRegistryURL, "imagedoesnotexist")
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo, tmpDir)
|
||||
|
||||
//tagged
|
||||
assertSkopeoFails(c, ".*reading manifest.*",
|
||||
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDirSourceNotExisting(c *check.C) {
|
||||
// Make sure the dir does not exist!
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir = filepath.Join(tmpDir, "this-does-not-exist")
|
||||
err := os.RemoveAll(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = os.Stat(path.Join(tmpDir))
|
||||
c.Check(os.IsNotExist(err), check.Equals, true)
|
||||
|
||||
assertSkopeoFails(c, ".*no such file or directory.*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL)
|
||||
}
|
||||
@@ -3,16 +3,23 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-check/check"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
const skopeoBinary = "skopeo"
|
||||
const decompressDirsBinary = "./decompress-dirs.sh"
|
||||
|
||||
const testFQIN = "docker://quay.io/libpod/busybox" // tag left off on purpose, some tests need to add a special one
|
||||
const testFQIN64 = "docker://quay.io/libpod/busybox:amd64"
|
||||
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:master" // multi-layer
|
||||
|
||||
// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
|
||||
func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) {
|
||||
@@ -156,15 +163,15 @@ func modifyEnviron(env []string, name, value string) []string {
|
||||
// fileFromFixtureFixture applies edits to inputPath and returns a path to the temporary file.
|
||||
// Callers should defer os.Remove(the_returned_path)
|
||||
func fileFromFixture(c *check.C, inputPath string, edits map[string]string) string {
|
||||
contents, err := ioutil.ReadFile(inputPath)
|
||||
contents, err := os.ReadFile(inputPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
for template, value := range edits {
|
||||
updated := bytes.Replace(contents, []byte(template), []byte(value), -1)
|
||||
updated := bytes.ReplaceAll(contents, []byte(template), []byte(value))
|
||||
c.Assert(bytes.Equal(updated, contents), check.Equals, false, check.Commentf("Replacing %s in %#v failed", template, string(contents))) // Verify that the template has matched something and we are not silently ignoring it.
|
||||
contents = updated
|
||||
}
|
||||
|
||||
file, err := ioutil.TempFile("", "policy.json")
|
||||
file, err := os.CreateTemp("", "policy.json")
|
||||
c.Assert(err, check.IsNil)
|
||||
path := file.Name()
|
||||
|
||||
@@ -174,3 +181,35 @@ func fileFromFixture(c *check.C, inputPath string, edits map[string]string) stri
|
||||
c.Assert(err, check.IsNil)
|
||||
return path
|
||||
}
|
||||
|
||||
// runDecompressDirs runs decompress-dirs.sh using exec.Command().CombinedOutput, verifies that the exit status is 0,
|
||||
// and optionally that the output matches a multi-line regexp if it is nonempty; or terminates c on failure
|
||||
func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
c.Logf("Running %s %s", decompressDirsBinary, strings.Join(args, " "))
|
||||
for i, dir := range args {
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d before: %s", i+1, string(m))
|
||||
}
|
||||
out, err := exec.Command(decompressDirsBinary, args...).CombinedOutput()
|
||||
c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
for i, dir := range args {
|
||||
if len(out) > 0 {
|
||||
c.Logf("output: %s", out)
|
||||
}
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d after: %s", i+1, string(m))
|
||||
}
|
||||
if regexp != "" {
|
||||
c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines
|
||||
}
|
||||
}
|
||||
|
||||
// Verify manifest in a dir: image at dir is expectedMIMEType.
|
||||
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
manifestBlob, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
|
||||
132
skopeo.spec.rpkg
Normal file
132
skopeo.spec.rpkg
Normal file
@@ -0,0 +1,132 @@
|
||||
# For automatic rebuilds in COPR
|
||||
|
||||
# The following tag is to get correct syntax highlighting for this file in vim text editor
|
||||
# vim: syntax=spec
|
||||
|
||||
# Any additinoal comments should go below this line or else syntax highlighting
|
||||
# may not work.
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
%global gomodulesmode GO111MODULE=on
|
||||
%global with_debug 1
|
||||
|
||||
%if 0%{?with_debug}
|
||||
%global _find_debuginfo_dwz_opts %{nil}
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%else
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
%if ! 0%{?gobuild:1}
|
||||
%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld '" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
Name: {{{ git_dir_name }}}
|
||||
Epoch: 101
|
||||
Version: {{{ git_dir_version }}}
|
||||
Release: 1%{?dist}
|
||||
Summary: Inspect container images and repositories on registries
|
||||
License: ASL 2.0
|
||||
URL: https://github.com/containers/skopeo
|
||||
VCS: {{{ git_dir_vcs }}}
|
||||
Source: {{{ git_dir_pack }}}
|
||||
%if 0%{?fedora} && ! 0%{?rhel}
|
||||
BuildRequires: btrfs-progs-devel
|
||||
%endif
|
||||
BuildRequires: golang >= 1.16.6
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: git-core
|
||||
BuildRequires: go-md2man
|
||||
%if 0%{?fedora} || 0%{?rhel} >= 9
|
||||
BuildRequires: go-rpm-macros
|
||||
%endif
|
||||
BuildRequires: pkgconfig(devmapper)
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: pkgconfig
|
||||
BuildRequires: make
|
||||
BuildRequires: ostree-devel
|
||||
%if 0%{?fedora} <= 35
|
||||
Requires: containers-common >= 4:1-39
|
||||
%else
|
||||
Requires: containers-common >= 4:1-46
|
||||
%endif
|
||||
|
||||
%description
|
||||
Command line utility to inspect images and repositories directly on Docker
|
||||
registries without the need to pull them.
|
||||
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
Requires: bats
|
||||
Requires: gnupg
|
||||
Requires: jq
|
||||
Requires: podman
|
||||
Requires: httpd-tools
|
||||
Requires: openssl
|
||||
Requires: fakeroot
|
||||
Requires: squashfs-tools
|
||||
|
||||
%description tests
|
||||
%{summary}
|
||||
|
||||
This package contains system tests for %{name}
|
||||
|
||||
%prep
|
||||
{{{ git_dir_setup_macro }}}
|
||||
|
||||
sed -i 's/install-binary: bin\/skopeo/install-binary:/' Makefile
|
||||
|
||||
# This will invoke `make` command in the directory with the extracted sources.
|
||||
%build
|
||||
%set_build_flags
|
||||
export CGO_CFLAGS=$CFLAGS
|
||||
# These extra flags present in $CFLAGS have been skipped for now as they break the build
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
|
||||
|
||||
%ifarch x86_64
|
||||
export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full"
|
||||
%endif
|
||||
|
||||
LDFLAGS=""
|
||||
|
||||
export BUILDTAGS="$(hack/libdm_tag.sh)"
|
||||
%if 0%{?rhel}
|
||||
export BUILDTAGS="$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion"
|
||||
%endif
|
||||
|
||||
%gobuild -o bin/%{name} ./cmd/%{name}
|
||||
|
||||
%install
|
||||
%{__make} PREFIX=%{buildroot}%{_prefix} install-binary install-docs install-completions
|
||||
|
||||
# system tests
|
||||
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp -pav systemtest/* %{buildroot}/%{_datadir}/%{name}/test/system/
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
%{_bindir}/%{name}
|
||||
%{_mandir}/man1/%%{name}*
|
||||
%dir %{_datadir}/bash-completion
|
||||
%dir %{_datadir}/bash-completion/completions
|
||||
%{_datadir}/bash-completion/completions/%{name}
|
||||
%dir %{_datadir}/fish
|
||||
%dir %{_datadir}/fish/vendor_completions.d
|
||||
%{_datadir}/fish/vendor_completions.d/%{name}.fish
|
||||
%dir %{_datadir}/zsh
|
||||
%dir %{_datadir}/zsh/site-functions
|
||||
%{_datadir}/zsh/site-functions/_%{name}
|
||||
|
||||
%files tests
|
||||
%license LICENSE
|
||||
%{_datadir}/%{name}/test
|
||||
|
||||
%changelog
|
||||
{{{ git_dir_changelog }}}
|
||||
@@ -27,11 +27,20 @@ load helpers
|
||||
# Now run inspect locally
|
||||
run_skopeo inspect dir:$workdir
|
||||
inspect_local=$output
|
||||
run_skopeo inspect --raw dir:$workdir
|
||||
inspect_local_raw=$output
|
||||
config_digest=$(jq -r '.config.digest' <<<"$inspect_local_raw")
|
||||
|
||||
# Each SHA-named file must be listed in the output of 'inspect'
|
||||
# Each SHA-named layer file (but not the config) must be listed in the output of 'inspect'.
|
||||
# In all existing versions of Skopeo (with 1.6 being the current as of this comment),
|
||||
# the output of 'inspect' lists layer digests,
|
||||
# but not the digest of the config blob ($config_digest), if any.
|
||||
layers=$(jq -r '.Layers' <<<"$inspect_local")
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
expect_output --from="$inspect_local" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
if [ "sha256:$sha" != "$config_digest" ]; then
|
||||
expect_output --from="$layers" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
fi
|
||||
done
|
||||
|
||||
# Simple sanity check on 'inspect' output.
|
||||
@@ -64,4 +73,59 @@ Os linux
|
||||
END_EXPECT
|
||||
}
|
||||
|
||||
@test "inspect: env" {
|
||||
remote_image=docker://quay.io/libpod/fedora:31
|
||||
run_skopeo inspect $remote_image
|
||||
inspect_remote=$output
|
||||
|
||||
# Simple check on 'inspect' output with environment variables.
|
||||
# 1) Get remote image values of environment variables (the value of 'Env')
|
||||
# 2) Confirm substring in check_array and the value of 'Env' match.
|
||||
check_array=(FGC=f31 DISTTAG=f31container)
|
||||
remote=$(jq '.Env[]' <<<"$inspect_remote")
|
||||
for substr in ${check_array[@]}; do
|
||||
expect_output --from="$remote" --substring "$substr"
|
||||
done
|
||||
}
|
||||
|
||||
# Tests https://github.com/containers/skopeo/pull/708
|
||||
@test "inspect: image manifest list w/ diff platform" {
|
||||
# This image's manifest is for an os + arch that is... um, unlikely
|
||||
# to support skopeo in the foreseeable future. Or past. The image
|
||||
# is created by the make-noarch-manifest script in this directory.
|
||||
img=docker://quay.io/libpod/notmyarch:20210121
|
||||
|
||||
# Get our host arch (what we're running on). This assumes that skopeo
|
||||
# arch matches podman; it also assumes running podman >= April 2020
|
||||
# (prior to that, the format keys were lower-case).
|
||||
arch=$(podman info --format '{{.Host.Arch}}')
|
||||
|
||||
# By default, 'inspect' tries to match our host os+arch. This should fail.
|
||||
run_skopeo 1 inspect $img
|
||||
expect_output --substring "parsing manifest for image: choosing image instance: no image found in manifest list for architecture $arch, variant " \
|
||||
"skopeo inspect, without --raw, fails"
|
||||
|
||||
# With --raw, we can inspect
|
||||
run_skopeo inspect --raw $img
|
||||
expect_output --substring "manifests.*platform.*architecture" \
|
||||
"skopeo inspect --raw returns reasonable output"
|
||||
|
||||
# ...and what we get should be consistent with what our script created.
|
||||
archinfo=$(jq -r '.manifests[0].platform | {os,variant,architecture} | join("-")' <<<"$output")
|
||||
|
||||
expect_output --from="$archinfo" "amigaos-1000-mc68000" \
|
||||
"os - variant - architecture of $img"
|
||||
}
|
||||
|
||||
@test "inspect: don't list tags" {
|
||||
remote_image=docker://quay.io/fedora/fedora
|
||||
# use --no-tags to not list any tags
|
||||
run_skopeo inspect --no-tags $remote_image
|
||||
inspect_output=$output
|
||||
# extract the content of "RepoTags" property from the JSON output
|
||||
repo_tags=$(jq '.RepoTags[]' <<<"$inspect_output")
|
||||
# verify that the RepoTags was empty
|
||||
expect_output --from="$repo_tags" "" "inspect --no-tags was expected to return empty RepoTags[]"
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
|
||||
@@ -14,7 +14,7 @@ function setup() {
|
||||
# From remote, to dir1, to local, to dir2;
|
||||
# compare dir1 and dir2, expect no changes
|
||||
@test "copy: dir, round trip" {
|
||||
local remote_image=docker://busybox:latest
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
@@ -30,7 +30,7 @@ function setup() {
|
||||
|
||||
# Same as above, but using 'oci:' instead of 'dir:' and with a :latest tag
|
||||
@test "copy: oci, round trip" {
|
||||
local remote_image=docker://busybox:latest
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/oci1
|
||||
@@ -44,9 +44,30 @@ function setup() {
|
||||
diff -urN $dir1 $dir2
|
||||
}
|
||||
|
||||
# Compression zstd
|
||||
@test "copy: oci, zstd" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
|
||||
# zstd magic number
|
||||
local magic=$(printf "\x28\xb5\x2f\xfd")
|
||||
|
||||
# Check there is at least one file that has the zstd magic number as the first 4 bytes
|
||||
(for i in $dir/blobs/sha256/*; do test "$(head -c 4 $i)" = $magic && exit 0; done; exit 1)
|
||||
|
||||
# Check that the manifest's description of the image's first layer is the zstd layer type
|
||||
instance=$(jq -r '.manifests[0].digest' $dir/index.json)
|
||||
[[ "$instance" != null ]]
|
||||
mediatype=$(jq -r '.layers[0].mediaType' < $dir/blobs/${instance/://})
|
||||
[[ "$mediatype" == "application/vnd.oci.image.layer.v1.tar+zstd" ]]
|
||||
}
|
||||
|
||||
# Same image, extracted once with :tag and once without
|
||||
@test "copy: oci w/ and w/o tags" {
|
||||
local remote_image=docker://busybox:latest
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
@@ -61,6 +82,15 @@ function setup() {
|
||||
grep '"org.opencontainers.image.ref.name":"withtag"' $dir2/index.json
|
||||
}
|
||||
|
||||
# Registry -> storage -> oci-archive
|
||||
@test "copy: registry -> storage -> oci-archive" {
|
||||
local alpine=quay.io/libpod/alpine:latest
|
||||
local tmp=$TESTDIR/oci
|
||||
|
||||
run_skopeo copy docker://$alpine containers-storage:$alpine
|
||||
run_skopeo copy containers-storage:$alpine oci-archive:$tmp
|
||||
}
|
||||
|
||||
# This one seems unlikely to get fixed
|
||||
@test "copy: bug 651" {
|
||||
skip "Enable this once skopeo issue #651 has been fixed"
|
||||
@@ -70,6 +100,64 @@ function setup() {
|
||||
docker://localhost:5000/foo
|
||||
}
|
||||
|
||||
# manifest format
|
||||
@test "copy: manifest format" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy --format v2s2 $remote_image dir:$dir1
|
||||
run_skopeo copy --format oci $remote_image dir:$dir2
|
||||
grep 'application/vnd.docker.distribution.manifest.v2' $dir1/manifest.json
|
||||
grep 'application/vnd.oci.image' $dir2/manifest.json
|
||||
}
|
||||
|
||||
# additional tag
|
||||
@test "copy: additional tag" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
# additional-tag is supported only for docker-archive
|
||||
run_skopeo copy --additional-tag busybox:mine $remote_image \
|
||||
docker-archive:$TESTDIR/mybusybox.tar:busybox:latest
|
||||
mkdir -p $TESTDIR/podmanroot
|
||||
run podman --root $TESTDIR/podmanroot load -i $TESTDIR/mybusybox.tar
|
||||
run podman --root $TESTDIR/podmanroot images
|
||||
expect_output --substring "mine"
|
||||
|
||||
# rootless cleanup needs to be done with unshare due to subuids
|
||||
if [[ "$(id -u)" != "0" ]]; then
|
||||
run podman unshare rm -rf $TESTDIR/podmanroot
|
||||
fi
|
||||
}
|
||||
|
||||
# shared blob directory
|
||||
@test "copy: shared blob directory" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local shareddir=$TESTDIR/shareddir
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy --dest-shared-blob-dir $shareddir \
|
||||
$remote_image oci:$dir1
|
||||
[ -n "$(ls $shareddir)" ]
|
||||
[ -z "$(ls $dir1/blobs)" ]
|
||||
run_skopeo copy --src-shared-blob-dir $shareddir \
|
||||
oci:$dir1 oci:$dir2
|
||||
diff -urN $shareddir $dir2/blobs
|
||||
}
|
||||
|
||||
@test "copy: sif image" {
|
||||
type -path fakeroot || skip "'fakeroot' tool not available"
|
||||
|
||||
local localimg=dir:$TESTDIR/dir
|
||||
|
||||
run_skopeo copy sif:${TEST_SOURCE_DIR}/testdata/busybox_latest.sif $localimg
|
||||
run_skopeo inspect $localimg --format "{{.Architecture}}"
|
||||
expect_output "amd64"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
|
||||
@@ -8,19 +8,28 @@ load helpers
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
start_registry --with-cert reg
|
||||
start_registry --with-cert --enable-delete=true reg
|
||||
}
|
||||
|
||||
@test "local registry, with cert" {
|
||||
# Push to local registry...
|
||||
run_skopeo copy --dest-cert-dir=$TESTDIR/client-auth \
|
||||
docker://busybox:latest \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
|
||||
# ...and pull it back out
|
||||
run_skopeo copy --src-cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned \
|
||||
dir:$TESTDIR/extracted
|
||||
|
||||
# inspect with cert
|
||||
run_skopeo inspect --cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
|
||||
# delete with cert
|
||||
run_skopeo delete --cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
}
|
||||
|
||||
teardown() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user