mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
281 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee60474d5a | ||
|
|
ff2a361a0a | ||
|
|
7ebff0f533 | ||
|
|
787e10873c | ||
|
|
a2f29acc7d | ||
|
|
ee84302b60 | ||
|
|
a169ccf8f3 | ||
|
|
89ae387d7b | ||
|
|
66fe7af769 | ||
|
|
feabfac2a7 | ||
|
|
cf354b7abd | ||
|
|
18a95f947e | ||
|
|
07da29fd37 | ||
|
|
9b40f0be2f | ||
|
|
869d496f18 | ||
|
|
166b587a77 | ||
|
|
0a42c33af9 | ||
|
|
90c5033886 | ||
|
|
d3ff6e2635 | ||
|
|
06cf25fb53 | ||
|
|
3a05dca94e | ||
|
|
7bbaffc4f4 | ||
|
|
2b948c177a | ||
|
|
d9dfc44888 | ||
|
|
ba23a9162f | ||
|
|
1eada90813 | ||
|
|
4b9ffac0cc | ||
|
|
a81460437a | ||
|
|
f36752a279 | ||
|
|
4e2dee4362 | ||
|
|
d58e59a57d | ||
|
|
3450c11a0d | ||
|
|
b2e7139331 | ||
|
|
3a808c2ed5 | ||
|
|
04169cac6e | ||
|
|
a99bd0c9e3 | ||
|
|
97c3eabacf | ||
|
|
fa2b15ff76 | ||
|
|
9e79da5e33 | ||
|
|
97cb423b52 | ||
|
|
32f24e8870 | ||
|
|
a863a0dccb | ||
|
|
67a4e04471 | ||
|
|
14b05e8064 | ||
|
|
e95123a2d4 | ||
|
|
ca1b0f34d1 | ||
|
|
28a5365945 | ||
|
|
73a668e99d | ||
|
|
61c28f5d47 | ||
|
|
eafd7e5518 | ||
|
|
2cfbeb2db8 | ||
|
|
b9cf626ea3 | ||
|
|
263d3264ba | ||
|
|
63dabfcf8b | ||
|
|
2eac0f463a | ||
|
|
c10b63dc71 | ||
|
|
b7e7374e71 | ||
|
|
08846d18cc | ||
|
|
049163fcec | ||
|
|
3039cd5a77 | ||
|
|
b42e664854 | ||
|
|
ad12a292a3 | ||
|
|
ee477d8877 | ||
|
|
dbe47d765a | ||
|
|
f1485781be | ||
|
|
a03cba7c7e | ||
|
|
7ddc5ce06c | ||
|
|
b000ada3f3 | ||
|
|
f2b4071b1f | ||
|
|
06be7a1559 | ||
|
|
b95e081162 | ||
|
|
61593fccc6 | ||
|
|
62158a58bc | ||
|
|
5e8a236c95 | ||
|
|
fffdc1f9df | ||
|
|
f75d570709 | ||
|
|
7900440ac9 | ||
|
|
c654871bd9 | ||
|
|
7abcca9313 | ||
|
|
f7df4a0838 | ||
|
|
68e9f2c576 | ||
|
|
331162358b | ||
|
|
89089f3a8d | ||
|
|
ba6af16e53 | ||
|
|
bc84a02bc4 | ||
|
|
2024e2e258 | ||
|
|
774ff9d16f | ||
|
|
1462a45c91 | ||
|
|
7bfa5cbad8 | ||
|
|
899d3686f9 | ||
|
|
1a98f253b4 | ||
|
|
fdd8aa2fd0 | ||
|
|
2f77d21343 | ||
|
|
2009d1c61e | ||
|
|
168f8d648a | ||
|
|
fe0228095b | ||
|
|
14650880c8 | ||
|
|
e7363a2e30 | ||
|
|
71d450cb35 | ||
|
|
3738854467 | ||
|
|
38f4b9d032 | ||
|
|
1b5fb465be | ||
|
|
e9ed5e04e2 | ||
|
|
f2c1d77c57 | ||
|
|
bbdabebd17 | ||
|
|
4b5e6327cd | ||
|
|
92c0d0c09d | ||
|
|
a3a72342f2 | ||
|
|
14a3b9241e | ||
|
|
e9379d15d2 | ||
|
|
eb61a79dde | ||
|
|
69840fd082 | ||
|
|
dc905cb7be | ||
|
|
63622bc7c4 | ||
|
|
02ae5c2af5 | ||
|
|
6b58459829 | ||
|
|
a5d4e6655d | ||
|
|
00a58e48b1 | ||
|
|
db663df804 | ||
|
|
263a5f017f | ||
|
|
47afd101f0 | ||
|
|
0a3be734a9 | ||
|
|
e8a3064328 | ||
|
|
0ad7ec2402 | ||
|
|
014d47f396 | ||
|
|
0fa1b5038f | ||
|
|
1add7a81d7 | ||
|
|
d78bc82782 | ||
|
|
6c2a415f6c | ||
|
|
9bed0a9e9a | ||
|
|
ebc5573e83 | ||
|
|
1ebb2520ca | ||
|
|
9b4c1f15f5 | ||
|
|
6863fe2d35 | ||
|
|
4b924061b8 | ||
|
|
3eca480c2b | ||
|
|
149bb8a671 | ||
|
|
149dea8dce | ||
|
|
a90efa2d60 | ||
|
|
804f7c249d | ||
|
|
e47765ed9e | ||
|
|
0c6074db50 | ||
|
|
13ceb93bdf | ||
|
|
30446fae02 | ||
|
|
cd4607f96b | ||
|
|
37727a45f9 | ||
|
|
75d94e790c | ||
|
|
1fe8da63a9 | ||
|
|
737ed9c2a4 | ||
|
|
39a4475cf3 | ||
|
|
3c286dd1d1 | ||
|
|
b8b0e9937b | ||
|
|
437d33ec9a | ||
|
|
d9035db615 | ||
|
|
198842bbec | ||
|
|
916a395d82 | ||
|
|
89acf46019 | ||
|
|
8960ab3ce7 | ||
|
|
145304b7cf | ||
|
|
e534472e7d | ||
|
|
d9d3ceca45 | ||
|
|
23a4605742 | ||
|
|
4811c07d71 | ||
|
|
15b38112b1 | ||
|
|
4ef35a385a | ||
|
|
38ae81fa03 | ||
|
|
e4297e3b30 | ||
|
|
9b09b6eb87 | ||
|
|
45ed92ce0c | ||
|
|
c233a6dcb1 | ||
|
|
e0f0869151 | ||
|
|
e6802c4df4 | ||
|
|
2b910649b9 | ||
|
|
808717862b | ||
|
|
f45ae950aa | ||
|
|
3bc062423e | ||
|
|
d0d7d97f9c | ||
|
|
89cd19519f | ||
|
|
3e973e1aa2 | ||
|
|
7f6b0e39d0 | ||
|
|
cc2445de81 | ||
|
|
f6bf57460d | ||
|
|
a9cc9b9133 | ||
|
|
91cd3510eb | ||
|
|
ac7edc7d10 | ||
|
|
92b1eec64c | ||
|
|
c819bc1754 | ||
|
|
6a2f38d66c | ||
|
|
2019b79c7f | ||
|
|
f79cc8aeda | ||
|
|
0bfe297fc1 | ||
|
|
ac4c291f76 | ||
|
|
d2837c9e56 | ||
|
|
5aaf3a9e4c | ||
|
|
0c4a9cc684 | ||
|
|
bd524670b1 | ||
|
|
693de29e37 | ||
|
|
f44ee2f80a | ||
|
|
a71900996f | ||
|
|
a26578178b | ||
|
|
7ba56f3f7a | ||
|
|
0f701726bd | ||
|
|
91ad8c39c6 | ||
|
|
ad3e8f407d | ||
|
|
0703ec6ce8 | ||
|
|
3e2defd6d3 | ||
|
|
5200272846 | ||
|
|
43eab90b36 | ||
|
|
0ad25b2d33 | ||
|
|
22d187181b | ||
|
|
8cbfcc820a | ||
|
|
8539d21152 | ||
|
|
370be7e777 | ||
|
|
95e17ed1e0 | ||
|
|
73edfb8216 | ||
|
|
49084d2cd8 | ||
|
|
8b904e908e | ||
|
|
23183072fb | ||
|
|
3be97ce281 | ||
|
|
b46506c077 | ||
|
|
49d9fa9faf | ||
|
|
77363128e1 | ||
|
|
59a452276b | ||
|
|
0f363498c2 | ||
|
|
a2dccca2e6 | ||
|
|
27b77f2bde | ||
|
|
6eda759dd2 | ||
|
|
de71408294 | ||
|
|
13cd098079 | ||
|
|
697ef59525 | ||
|
|
e4b79d7741 | ||
|
|
bf24ce9ff2 | ||
|
|
162bbab3a6 | ||
|
|
cf19643e76 | ||
|
|
afc18ceed3 | ||
|
|
004519f143 | ||
|
|
9db60ec007 | ||
|
|
cb74933b41 | ||
|
|
8fb455174d | ||
|
|
7f4db3db9d | ||
|
|
96cdfac7d9 | ||
|
|
a4476c358c | ||
|
|
1391aae0a5 | ||
|
|
042f481629 | ||
|
|
3518c50688 | ||
|
|
327f87d79b | ||
|
|
bd8ed664d5 | ||
|
|
b51707d50d | ||
|
|
2c84bc232c | ||
|
|
bb49923af4 | ||
|
|
639aabbaf3 | ||
|
|
cd58349b25 | ||
|
|
4b79ed7d7d | ||
|
|
2858904e4b | ||
|
|
15296d9876 | ||
|
|
923c58a8ee | ||
|
|
43726bbc27 | ||
|
|
1bf18b7ef8 | ||
|
|
df4d82b960 | ||
|
|
d32c56b47f | ||
|
|
6007e792e4 | ||
|
|
77f881e61c | ||
|
|
5aa06a51f4 | ||
|
|
e422e44fca | ||
|
|
f6a84289eb | ||
|
|
2689eb367f | ||
|
|
c5b45c6c49 | ||
|
|
037f518146 | ||
|
|
c582c4844f | ||
|
|
2046bfdaaa | ||
|
|
25868f17c0 | ||
|
|
e7dc5e79f2 | ||
|
|
3606b2d1de | ||
|
|
f03d0401c1 | ||
|
|
5c82c7728f | ||
|
|
37d801c90b | ||
|
|
c3f65951bc | ||
|
|
d94015466f | ||
|
|
1d24e657fa | ||
|
|
4dcd28df92 | ||
|
|
789ee8bea9 |
82
.cirrus.yml
82
.cirrus.yml
@@ -23,20 +23,14 @@ env:
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-35"
|
||||
PRIOR_FEDORA_NAME: "fedora-34"
|
||||
UBUNTU_NAME: "ubuntu-2110"
|
||||
FEDORA_NAME: "fedora-36"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6226133906620416"
|
||||
IMAGE_SUFFIX: "c5495735033528320"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
|
||||
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Built along with the standard PR-based workflow in c/automation_images
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
|
||||
@@ -53,7 +47,7 @@ validate_task:
|
||||
# The git-validation tool doesn't work well on branch or tag push,
|
||||
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
|
||||
# Only do validation for PRs.
|
||||
only_if: $CIRRUS_PR != ''
|
||||
only_if: &is_pr $CIRRUS_PR != ''
|
||||
container:
|
||||
image: '${SKOPEO_CIDEV_CONTAINER_FQIN}'
|
||||
cpu: 4
|
||||
@@ -63,7 +57,7 @@ validate_task:
|
||||
make vendor && hack/tree_status.sh
|
||||
|
||||
doccheck_task:
|
||||
only_if: $CIRRUS_PR != ''
|
||||
only_if: *is_pr
|
||||
depends_on:
|
||||
- validate
|
||||
container:
|
||||
@@ -81,7 +75,10 @@ doccheck_task:
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
|
||||
|
||||
osx_task:
|
||||
only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
# Run for regular PRs and those with [CI:BUILD] but not [CI:DOCS]
|
||||
only_if: ¬_docs_multiarch >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- validate
|
||||
macos_instance:
|
||||
@@ -90,7 +87,7 @@ osx_task:
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
brew update
|
||||
brew install gpgme go go-md2man
|
||||
go get -u golang.org/x/lint/golint
|
||||
go install golang.org/x/lint/golint@latest
|
||||
test_script: |
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
go version
|
||||
@@ -102,10 +99,10 @@ osx_task:
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
only_if: *not_docs
|
||||
only_if: *not_docs_multiarch
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
gce_instance: &standardvm
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
@@ -129,7 +126,11 @@ cross_task:
|
||||
#####
|
||||
test_skopeo_task:
|
||||
alias: test_skopeo
|
||||
only_if: *not_docs
|
||||
# Don't test for [CI:DOCS], [CI:BUILD], or 'multiarch' cron.
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
@@ -162,6 +163,49 @@ test_skopeo_task:
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
|
||||
|
||||
|
||||
image_build_task: &image-build
|
||||
name: "Build multi-arch $CTXDIR"
|
||||
alias: image_build
|
||||
# Some of these container images take > 1h to build, limit
|
||||
# this task to a specific Cirrus-Cron entry with this name.
|
||||
only_if: $CIRRUS_CRON == 'multiarch'
|
||||
timeout_in: 120m # emulation is sssllllooooowwww
|
||||
gce_instance:
|
||||
<<: *standardvm
|
||||
image_name: build-push-${IMAGE_SUFFIX}
|
||||
# More muscle required for parallel multi-arch build
|
||||
type: "n2-standard-4"
|
||||
matrix:
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/upstream
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/testing
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/stable
|
||||
env:
|
||||
SKOPEO_USERNAME: ENCRYPTED[4195884d23b154553f2ddb26a63fc9fbca50ba77b3e447e4da685d8639ed9bc94b9a86a9c77272c8c80d32ead9ca48da]
|
||||
SKOPEO_PASSWORD: ENCRYPTED[36e06f9befd17e5da2d60260edb9ef0d40e6312e2bba4cf881d383f1b8b5a18c8e5a553aea2fdebf39cebc6bd3b3f9de]
|
||||
CONTAINERS_USERNAME: ENCRYPTED[dd722c734641f103b394a3a834d51ca5415347e378637cf98ee1f99e64aad2ec3dbd4664c0d94cb0e06b83d89e9bbe91]
|
||||
CONTAINERS_PASSWORD: ENCRYPTED[d8b0fac87fe251cedd26c864ba800480f9e0570440b9eb264265b67411b253a626fb69d519e188e6c9a7f525860ddb26]
|
||||
main_script:
|
||||
- source /etc/automation_environment
|
||||
- main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR
|
||||
|
||||
|
||||
test_image_build_task:
|
||||
<<: *image-build
|
||||
alias: test_image_build
|
||||
# Allow this to run inside a PR w/ [CI:BUILD] only.
|
||||
only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
|
||||
# This takes a LONG time, only run when requested. N/B: Any task
|
||||
# made to depend on this one will block FOREVER unless triggered.
|
||||
# DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
|
||||
trigger_type: manual
|
||||
# Overwrite all 'env', don't push anything, just do the build.
|
||||
env:
|
||||
DRYRUN: 1
|
||||
|
||||
|
||||
# This task is critical. It updates the "last-used by" timestamp stored
|
||||
# in metadata for all VM images. This mechanism functions in tandem with
|
||||
# an out-of-band pruning operation to remove disused VM images.
|
||||
@@ -171,13 +215,12 @@ meta_task:
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:$IMAGE_SUFFIX
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: >-
|
||||
IMGNAMES: |
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
@@ -200,6 +243,7 @@ success_task:
|
||||
- osx
|
||||
- cross
|
||||
- test_skopeo
|
||||
- image_build
|
||||
- meta
|
||||
container: *smallcontainer
|
||||
env:
|
||||
|
||||
100
.github/workflows/check_cirrus_cron.yml
vendored
100
.github/workflows/check_cirrus_cron.yml
vendored
@@ -1,93 +1,17 @@
|
||||
---
|
||||
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
|
||||
|
||||
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
# Required to un-FUBAR default ${{github.workflow}} value
|
||||
name: check_cirrus_cron
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Debug-mode can reveal secrets, only enable by a secret value.
|
||||
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
|
||||
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
|
||||
# Use same destination addresses from podman repository
|
||||
FAILMAILCSV: './_podman/contrib/cirrus/cron-fail_addrs.csv'
|
||||
# Filename for table of cron-name to build-id data
|
||||
# (must be in $GITHUB_WORKSPACE/artifacts/)
|
||||
NAME_ID_FILEPATH: './artifacts/name_id.txt'
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
cron_failures:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Avoid duplicating cron_failures.sh in skopeo repo.
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: containers/podman
|
||||
path: '_podman'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get failed cron names and Build IDs
|
||||
id: cron
|
||||
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
shell: bash
|
||||
# Must be inline, since context expressions are used.
|
||||
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
|
||||
run: |
|
||||
set -eo pipefail
|
||||
(
|
||||
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
|
||||
echo ""
|
||||
|
||||
while read -r NAME BID; do
|
||||
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
|
||||
done < "$NAME_ID_FILEPATH"
|
||||
|
||||
echo ""
|
||||
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
|
||||
# Separate content from sendgrid.com automatic footer.
|
||||
echo ""
|
||||
echo ""
|
||||
) > ./artifacts/email_body.txt
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
id: mailto
|
||||
run: printf "::set-output name=csv::%s\n" $(cat "$FAILMAILCSV")
|
||||
|
||||
- if: steps.mailto.outputs.csv != ''
|
||||
name: Send failure notification e-mail
|
||||
# Ref: https://github.com/dawidd6/action-send-mail
|
||||
uses: dawidd6/action-send-mail@v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Cirrus-CI cron build failures on ${{github.repository}}
|
||||
to: ${{steps.mailto.outputs.csv}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: file://./artifacts/email_body.txt
|
||||
|
||||
- if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ github.job }}_artifacts
|
||||
path: artifacts/*
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_failures:
|
||||
uses: containers/buildah/.github/workflows/check_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
|
||||
209
.github/workflows/multi-arch-build.yaml
vendored
209
.github/workflows/multi-arch-build.yaml
vendored
@@ -1,209 +0,0 @@
|
||||
---
|
||||
|
||||
# Please see contrib/<reponame>image/README.md for details on the intentions
|
||||
# of this workflow.
|
||||
#
|
||||
# BIG FAT WARNING: This workflow is duplicated across containers/skopeo,
|
||||
# containers/buildah, and containers/podman. ANY AND
|
||||
# ALL CHANGES MADE HERE MUST BE MANUALLY DUPLICATED
|
||||
# TO THE OTHER REPOS.
|
||||
|
||||
name: build multi-arch images
|
||||
|
||||
on:
|
||||
# Upstream tends to be very active, with many merges per day.
|
||||
# Only run this daily via cron schedule, or manually, not by branch push.
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
# allows to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
multi:
|
||||
name: multi-arch image build
|
||||
env:
|
||||
REPONAME: skopeo # No easy way to parse this out of $GITHUB_REPOSITORY
|
||||
# Server/namespace value used to format FQIN
|
||||
REPONAME_QUAY_REGISTRY: quay.io/skopeo
|
||||
CONTAINERS_QUAY_REGISTRY: quay.io/containers
|
||||
# list of architectures for build
|
||||
PLATFORMS: linux/amd64,linux/s390x,linux/ppc64le,linux/arm64
|
||||
# Command to execute in container to obtain project version number
|
||||
VERSION_CMD: "--version" # skopeo is the entrypoint
|
||||
|
||||
# build several images (upstream, testing, stable) in parallel
|
||||
strategy:
|
||||
# By default, failure of one matrix item cancels all others
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Builds are located under contrib/<reponame>image/<source> directory
|
||||
source:
|
||||
- upstream
|
||||
- testing
|
||||
- stable
|
||||
runs-on: ubuntu-latest
|
||||
# internal registry caches build for inspection before push
|
||||
services:
|
||||
registry:
|
||||
image: quay.io/libpod/registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: network=host
|
||||
install: true
|
||||
|
||||
- name: Build and locally push image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
|
||||
# Simple verification that stable images work, and
|
||||
# also grab version number use in forming the FQIN.
|
||||
- name: amd64 container sniff test
|
||||
if: matrix.source == 'stable'
|
||||
id: sniff_test
|
||||
run: |
|
||||
podman pull --tls-verify=false \
|
||||
localhost:5000/$REPONAME/${{ matrix.source }}
|
||||
VERSION_OUTPUT=$(podman run \
|
||||
localhost:5000/$REPONAME/${{ matrix.source }} \
|
||||
$VERSION_CMD)
|
||||
echo "$VERSION_OUTPUT"
|
||||
VERSION=$(awk -r -e "/^${REPONAME} version /"'{print $3}' <<<"$VERSION_OUTPUT")
|
||||
test -n "$VERSION"
|
||||
echo "::set-output name=version::$VERSION"
|
||||
|
||||
- name: Generate image FQIN(s) to push
|
||||
id: reponame_reg
|
||||
run: |
|
||||
if [[ "${{ matrix.source }}" == 'stable' ]]; then
|
||||
# The command version in image just built
|
||||
VERSION='v${{ steps.sniff_test.outputs.version }}'
|
||||
# workaround vim syntax-highlight bug: '
|
||||
# Push both new|updated version-tag and latest-tag FQINs
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/stable:$VERSION,$REPONAME_QUAY_REGISTRY/stable:latest"
|
||||
elif [[ "${{ matrix.source }}" == 'testing' ]]; then
|
||||
# Assume some contents changed, always push latest testing.
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/testing:latest"
|
||||
elif [[ "${{ matrix.source }}" == 'upstream' ]]; then
|
||||
# Assume some contents changed, always push latest upstream.
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/upstream:latest"
|
||||
else
|
||||
echo "::error::Unknown matrix item '${{ matrix.source }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "::warning::Pushing $FQIN"
|
||||
echo "::set-output name=fqin::${FQIN}"
|
||||
echo '::set-output name=push::true'
|
||||
|
||||
# This is substantially similar to the above logic,
|
||||
# but only handles $CONTAINERS_QUAY_REGISTRY for
|
||||
# the stable "latest" and named-version tagged images.
|
||||
- name: Generate containers reg. image FQIN(s)
|
||||
if: matrix.source == 'stable'
|
||||
id: containers_reg
|
||||
run: |
|
||||
VERSION='v${{ steps.sniff_test.outputs.version }}'
|
||||
# workaround vim syntax-highlight bug: '
|
||||
# Push both new|updated version-tag and latest-tag FQINs
|
||||
FQIN="$CONTAINERS_QUAY_REGISTRY/$REPONAME:$VERSION,$CONTAINERS_QUAY_REGISTRY/$REPONAME:latest"
|
||||
echo "::warning::Pushing $FQIN"
|
||||
echo "::set-output name=fqin::${FQIN}"
|
||||
echo '::set-output name=push::true'
|
||||
|
||||
- name: Define LABELS multi-line env. var. value
|
||||
run: |
|
||||
# This is a really hacky/strange workflow idiom, required
|
||||
# for setting multi-line $LABELS value for consumption in
|
||||
# a future step. There is literally no cleaner way to do this :<
|
||||
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#multiline-strings
|
||||
function set_labels() {
|
||||
echo 'LABELS<<DELIMITER' >> "$GITHUB_ENV"
|
||||
for line; do
|
||||
echo "$line" | tee -a "$GITHUB_ENV"
|
||||
done
|
||||
echo "DELIMITER" >> "$GITHUB_ENV"
|
||||
}
|
||||
|
||||
declare -a lines
|
||||
lines=(\
|
||||
"org.opencontainers.image.source=https://github.com/${GITHUB_REPOSITORY}.git"
|
||||
"org.opencontainers.image.revision=${GITHUB_SHA}"
|
||||
"org.opencontainers.image.created=$(date -u --iso-8601=seconds)"
|
||||
)
|
||||
|
||||
# Only the 'stable' matrix source obtains $VERSION
|
||||
if [[ "${{ matrix.source }}" == "stable" ]]; then
|
||||
lines+=(\
|
||||
"org.opencontainers.image.version=${{ steps.sniff_test.outputs.version }}"
|
||||
)
|
||||
fi
|
||||
|
||||
set_labels "${lines[@]}"
|
||||
|
||||
# Separate steps to login and push for $REPONAME_QUAY_REGISTRY and
|
||||
# $CONTAINERS_QUAY_REGISTRY are required, because 2 sets of credentials
|
||||
# are used and namespaced within the registry. At the same time, reuse
|
||||
# of non-shell steps is not supported by Github Actions nor are YAML
|
||||
# anchors/aliases, nor composite actions.
|
||||
|
||||
# Push to $REPONAME_QUAY_REGISTRY for stable, testing. and upstream
|
||||
- name: Login to ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
uses: docker/login-action@v1
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
registry: ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
# N/B: Secrets are not passed to workflows that are triggered
|
||||
# by a pull request from a fork
|
||||
username: ${{ secrets.REPONAME_QUAY_USERNAME }}
|
||||
password: ${{ secrets.REPONAME_QUAY_PASSWORD }}
|
||||
|
||||
- name: Push images to ${{ steps.reponame_reg.outputs.fqin }}
|
||||
uses: docker/build-push-action@v2
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
cache-to: type=inline
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: ${{ steps.reponame_reg.outputs.fqin }}
|
||||
labels: |
|
||||
${{ env.LABELS }}
|
||||
|
||||
# Push to $CONTAINERS_QUAY_REGISTRY only stable
|
||||
- name: Login to ${{ env.CONTAINERS_QUAY_REGISTRY }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ${{ env.CONTAINERS_QUAY_REGISTRY}}
|
||||
username: ${{ secrets.CONTAINERS_QUAY_USERNAME }}
|
||||
password: ${{ secrets.CONTAINERS_QUAY_PASSWORD }}
|
||||
|
||||
- name: Push images to ${{ steps.containers_reg.outputs.fqin }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
cache-to: type=inline
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: ${{ steps.containers_reg.outputs.fqin }}
|
||||
labels: |
|
||||
${{ env.LABELS }}
|
||||
10
.github/workflows/stale.yml
vendored
10
.github/workflows/stale.yml
vendored
@@ -7,13 +7,17 @@ on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
permissions:
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v1
|
||||
- uses: actions/stale@98ed4cb500039dbcccf4bd9bedada4d0187f2757 # v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,7 +2,7 @@
|
||||
/layers-*
|
||||
/skopeo
|
||||
result
|
||||
|
||||
/completions/
|
||||
# ignore JetBrains IDEs (GoLand) config folder
|
||||
.idea
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ When new PRs for [containers/image](https://github.com/containers/image) break `
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
IRC group on `irc.freenode.net` called `container-projects`
|
||||
IRC channel on `irc.libera.chat` called `#container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
|
||||
73
Makefile
73
Makefile
@@ -2,23 +2,22 @@
|
||||
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
# On some platforms (eg. macOS, FreeBSD) gpgme is installed in /usr/local/ but /usr/local/include/ is
|
||||
# not in the default search path. Rather than hard-code this directory, use gpgme-config.
|
||||
# Sadly that must be done at the top-level user instead of locally in the gpgme subpackage, because cgo
|
||||
# supports only pkg-config, not general shell scripts, and gpgme does not install a pkg-config file.
|
||||
# If gpgme is not installed or gpgme-config can’t be found for other reasons, the error is silently ignored
|
||||
# (and the user will probably find out because the cgo compilation will fail).
|
||||
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
|
||||
|
||||
# The following variables very roughly follow https://www.gnu.org/prep/standards/standards.html#Makefile-Conventions .
|
||||
DESTDIR ?=
|
||||
PREFIX ?= /usr/local
|
||||
ifeq ($(shell uname -s),FreeBSD)
|
||||
CONTAINERSCONFDIR ?= /usr/local/etc/containers
|
||||
else
|
||||
CONTAINERSCONFDIR ?= /etc/containers
|
||||
endif
|
||||
REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d
|
||||
SIGSTOREDIR ?= /var/lib/containers/sigstore
|
||||
LOOKASIDEDIR ?= /var/lib/containers/sigstore
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man
|
||||
BASHCOMPLETIONSDIR ?= ${PREFIX}/share/bash-completion/completions
|
||||
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
ZSHINSTALLDIR=${PREFIX}/share/zsh/site-functions
|
||||
FISHINSTALLDIR=${PREFIX}/share/fish/vendor_completions.d
|
||||
|
||||
GO ?= go
|
||||
GOBIN := $(shell $(GO) env GOBIN)
|
||||
@@ -29,10 +28,15 @@ ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
# Multiple scripts are sensitive to this value, make sure it's exported/available
|
||||
# N/B: Need to use 'command -v' here for compatibility with MacOS.
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
|
||||
# Scripts may also use CONTAINER_RUNTIME, so we need to export it.
|
||||
# Note possibly non-obvious aspects of this:
|
||||
# - We need to use 'command -v' here, not 'which', for compatibility with MacOS.
|
||||
# - GNU Make 4.2.1 (included in Ubuntu 20.04) incorrectly tries to avoid invoking
|
||||
# a shell, and fails because there is no /usr/bin/command. The trailing ';' in
|
||||
# $(shell … ;) defeats that heuristic (recommended in
|
||||
# https://savannah.gnu.org/bugs/index.php?57625 ).
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman ;),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man ;),go-md2man,$(GOBIN)/go-md2man)
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources.
|
||||
# See also hack/make.sh.
|
||||
@@ -51,8 +55,6 @@ ifeq ($(GOOS), linux)
|
||||
endif
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
|
||||
# You can increase test output verbosity with the option '-test.vv'.
|
||||
# You can select certain tests to run, with `-test.run <regex>` for example:
|
||||
@@ -88,7 +90,7 @@ endif
|
||||
CONTAINER_GOSRC = /src/github.com/containers/skopeo
|
||||
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT := $(shell GIT_CEILING_DIRECTORIES=$$(cd ..; pwd) git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
@@ -112,6 +114,9 @@ endif
|
||||
# use source debugging tools like delve.
|
||||
all: bin/skopeo docs
|
||||
|
||||
codespell:
|
||||
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L fpr,uint,iff,od,ERRO -w
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@@ -134,7 +139,7 @@ binary: cmd/skopeo
|
||||
# Build w/o using containers
|
||||
.PHONY: bin/skopeo
|
||||
bin/skopeo:
|
||||
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
$(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
bin/skopeo.%:
|
||||
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO) build $(MOD_VENDOR) ${SKOPEO_LDFLAGS} -tags "containers_image_openpgp $(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
local-cross: bin/skopeo.darwin.amd64 bin/skopeo.linux.arm bin/skopeo.linux.arm64 bin/skopeo.windows.386.exe bin/skopeo.windows.amd64.exe
|
||||
@@ -149,11 +154,19 @@ docs: $(MANPAGES)
|
||||
docs-in-container:
|
||||
${CONTAINER_RUN} $(MAKE) docs $(if $(DEBUG),DEBUG=$(DEBUG))
|
||||
|
||||
.PHONY: completions
|
||||
completions: bin/skopeo
|
||||
install -d -m 755 completions/bash completions/zsh completions/fish completions/powershell
|
||||
./bin/skopeo completion bash >| completions/bash/skopeo
|
||||
./bin/skopeo completion zsh >| completions/zsh/_skopeo
|
||||
./bin/skopeo completion fish >| completions/fish/skopeo.fish
|
||||
./bin/skopeo completion powershell >| completions/powershell/skopeo.ps1
|
||||
|
||||
clean:
|
||||
rm -rf bin docs/*.1
|
||||
rm -rf bin docs/*.1 completions/
|
||||
|
||||
install: install-binary install-docs install-completions
|
||||
install -d -m 755 ${DESTDIR}${SIGSTOREDIR}
|
||||
install -d -m 755 ${DESTDIR}${LOOKASIDEDIR}
|
||||
install -d -m 755 ${DESTDIR}${CONTAINERSCONFDIR}
|
||||
install -m 644 default-policy.json ${DESTDIR}${CONTAINERSCONFDIR}/policy.json
|
||||
install -d -m 755 ${DESTDIR}${REGISTRIESDDIR}
|
||||
@@ -169,9 +182,14 @@ ifneq ($(DISABLE_DOCS), 1)
|
||||
install -m 644 docs/*.1 ${DESTDIR}${MANDIR}/man1
|
||||
endif
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${DESTDIR}${BASHCOMPLETIONSDIR}
|
||||
install -m 644 completions/bash/skopeo ${DESTDIR}${BASHCOMPLETIONSDIR}/skopeo
|
||||
install-completions: completions
|
||||
install -d -m 755 ${DESTDIR}${BASHINSTALLDIR}
|
||||
install -m 644 completions/bash/skopeo ${DESTDIR}${BASHINSTALLDIR}
|
||||
install -d -m 755 ${DESTDIR}${ZSHINSTALLDIR}
|
||||
install -m 644 completions/zsh/_skopeo ${DESTDIR}${ZSHINSTALLDIR}
|
||||
install -d -m 755 ${DESTDIR}${FISHINSTALLDIR}
|
||||
install -m 644 completions/fish/skopeo.fish ${DESTDIR}${FISHINSTALLDIR}
|
||||
# There is no common location for powershell files so do not install them. Users have to source the file from their powershell profile.
|
||||
|
||||
shell:
|
||||
$(CONTAINER_RUN) bash
|
||||
@@ -224,7 +242,7 @@ validate-docs:
|
||||
hack/xref-helpmsgs-manpages
|
||||
|
||||
test-unit-local: bin/skopeo
|
||||
$(GPGME_ENV) $(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
$(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor:
|
||||
$(GO) mod tidy
|
||||
@@ -232,4 +250,9 @@ vendor:
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src golang $(MAKE) vendor
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
rpm:
|
||||
rpkg local
|
||||
|
||||
@@ -207,7 +207,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between registry repositories and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
//go:build !containers_image_openpgp
|
||||
// +build !containers_image_openpgp
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
This is a pretty horrible workaround. Due to a glibc bug
|
||||
https://bugzilla.redhat.com/show_bug.cgi?id=1326903 , we must ensure we link
|
||||
with -lgpgme before -lpthread. Such arguments come from various packages
|
||||
using cgo, and the ordering of these arguments is, with current (go tool link),
|
||||
dependent on the order in which the cgo-using packages are found in a
|
||||
breadth-first search following dependencies, starting from “main”.
|
||||
|
||||
Thus, if
|
||||
import "net"
|
||||
is processed before
|
||||
import "…/skopeo/signature"
|
||||
it will, in the next level of the BFS, pull in "runtime/cgo" (a dependency of
|
||||
"net") before "mtrmac/gpgme" (a dependency of "…/skopeo/signature"), causing
|
||||
-lpthread (used by "runtime/cgo") to be used before -lgpgme.
|
||||
|
||||
This might be possible to work around by careful import ordering, or by removing
|
||||
a direct dependency on "net", but that would be very fragile.
|
||||
|
||||
So, until the above bug is fixed, add -lgpgme directly in the "main" package
|
||||
to ensure the needed build order.
|
||||
|
||||
Unfortunately, this workaround needs to be applied at the top level of any user
|
||||
of "…/skopeo/signature"; it cannot be added to "…/skopeo/signature" itself,
|
||||
by that time this package is first processed by the linker, a -lpthread may
|
||||
already be queued and it would be too late.
|
||||
*/
|
||||
|
||||
// #cgo LDFLAGS: -lgpgme
|
||||
import "C"
|
||||
16
cmd/skopeo/completions.go
Normal file
16
cmd/skopeo/completions.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// autocompleteSupportedTransports list all supported transports with the colon suffix.
|
||||
func autocompleteSupportedTransports(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
tps := transports.ListNames()
|
||||
suggestions := make([]string, 0, len(tps))
|
||||
for _, tp := range tps {
|
||||
suggestions = append(suggestions, tp+":")
|
||||
}
|
||||
return suggestions, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
@@ -20,21 +21,26 @@ import (
|
||||
)
|
||||
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
digestFile string // Write digest to this file
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
global *globalOptions
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.Options
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing (for either signature format, but only one of them)
|
||||
signIdentity string // Identity of the signed image, must be a fully specified docker reference
|
||||
digestFile string // Write digest to this file
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
multiArch commonFlag.OptionalString // How to handle multi architecture images
|
||||
preserveDigests bool // Preserve digests during copy
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -59,8 +65,9 @@ Supported transports:
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
@@ -72,8 +79,13 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.StringSliceVar(&opts.additionalTags, "additional-tag", []string{}, "additional tags (supports docker-archive)")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress output information when copying images")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.Var(commonFlag.NewOptionalStringValue(&opts.multiArch), "multi-arch", `How to handle multi-architecture images (system, all, or index-only)`)
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "Read a passphrase for signing an image from `PATH`")
|
||||
flags.StringVar(&opts.signIdentity, "sign-identity", "", "Identity of signed image, must be a fully specified docker reference. Defaults to the target docker reference.")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
@@ -82,7 +94,28 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
// parseMultiArch parses the list processing selection
|
||||
// It returns the copy.ImageListSelection to use with image.Copy option
|
||||
func parseMultiArch(multiArch string) (copy.ImageListSelection, error) {
|
||||
switch multiArch {
|
||||
case "system":
|
||||
return copy.CopySystemImage, nil
|
||||
case "all":
|
||||
return copy.CopyAllImages, nil
|
||||
// There is no CopyNoImages value in copy.ImageListSelection, but because we
|
||||
// don't provide an option to select a set of images to copy, we can use
|
||||
// CopySpecificImages.
|
||||
case "index-only":
|
||||
return copy.CopySpecificImages, nil
|
||||
// We don't expose CopySpecificImages other than index-only above, because
|
||||
// we currently don't provide an option to choose the images to copy. That
|
||||
// could be added in the future.
|
||||
default:
|
||||
return copy.CopySystemImage, fmt.Errorf("unknown multi-arch option %q. Choose one of the supported options: 'system', 'all', or 'index-only'", multiArch)
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
@@ -97,7 +130,11 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "tearing down policy context", err)
|
||||
}
|
||||
}()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
@@ -143,7 +180,17 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if opts.quiet {
|
||||
stdout = nil
|
||||
}
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.multiArch.Present() && opts.all {
|
||||
return fmt.Errorf("Cannot use --all and --multi-arch flags together")
|
||||
}
|
||||
if opts.multiArch.Present() {
|
||||
imageListSelection, err = parseMultiArch(opts.multiArch.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
}
|
||||
@@ -184,18 +231,54 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
decConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
// c/image/copy.Image does allow creating both simple signing and sigstore signatures simultaneously,
|
||||
// with independent passphrases, but that would make the CLI probably too confusing.
|
||||
// For now, use the passphrase with either, but only one of them.
|
||||
if opts.signPassphraseFile != "" && opts.signByFingerprint != "" && opts.signBySigstorePrivateKey != "" {
|
||||
return fmt.Errorf("Only one of --sign-by and sign-by-sigstore-private-key can be used with sign-passphrase-file")
|
||||
}
|
||||
var passphrase string
|
||||
if opts.signPassphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} else if opts.signBySigstorePrivateKey != "" {
|
||||
p, err := promptForPassphrase(opts.signBySigstorePrivateKey, os.Stdin, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} // opts.signByFingerprint triggers a GPG-agent passphrase prompt, possibly using a more secure channel, so we usually shouldn’t prompt ourselves if no passphrase was explicitly provided.
|
||||
|
||||
var signIdentity reference.Named = nil
|
||||
if opts.signIdentity != "" {
|
||||
signIdentity, err = reference.ParseNamed(opts.signIdentity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse --sign-identity: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(destRef.Transport())
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
ImageListSelection: imageListSelection,
|
||||
OciDecryptConfig: decConfig,
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
|
||||
SignIdentity: signIdentity,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
ImageListSelection: imageListSelection,
|
||||
PreserveDigests: opts.preserveDigests,
|
||||
OciDecryptConfig: decConfig,
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -205,7 +288,7 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ioutil.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
if err = os.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func deleteCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -35,8 +35,9 @@ Supported transports:
|
||||
%s
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo delete docker://registry.example.com/example/pause:latest`,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo delete docker://registry.example.com/example/pause:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
@@ -69,7 +70,7 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/cmd/skopeo/inspect"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
@@ -55,6 +55,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
Example: `skopeo inspect docker://registry.fedoraproject.org/fedora
|
||||
skopeo inspect --config docker://docker.io/alpine
|
||||
skopeo inspect --format "Name: {{.Name}} Digest: {{.Digest}}" docker://registry.access.redhat.com/ubi8`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
@@ -95,30 +96,30 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
src, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error parsing image name %q", imageName)
|
||||
return fmt.Errorf("Error parsing image name %q: %w", imageName, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
retErr = noteCloseFailure(retErr, "closing image", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
rawManifest, _, err = src.GetManifest(ctx, nil)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving manifest for image")
|
||||
return fmt.Errorf("Error retrieving manifest for image: %w", err)
|
||||
}
|
||||
|
||||
if opts.raw && !opts.config {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
return fmt.Errorf("Error writing manifest to standard output: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -126,29 +127,29 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
|
||||
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error parsing manifest for image")
|
||||
return fmt.Errorf("Error parsing manifest for image: %w", err)
|
||||
}
|
||||
|
||||
if opts.config && opts.raw {
|
||||
var configBlob []byte
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
configBlob, err = img.ConfigBlob(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error reading configuration blob")
|
||||
return fmt.Errorf("Error reading configuration blob: %w", err)
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error writing configuration blob to standard output")
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %w", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
var config *v1.Image
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
config, err = img.OCIConfig(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error reading OCI-formatted configuration data")
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %w", err)
|
||||
}
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
var out []byte
|
||||
@@ -162,12 +163,12 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
err = printTmpl(row, data)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error writing OCI-formatted configuration data to standard output")
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
imgInspect, err = img.Inspect(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -185,11 +186,12 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
LayersData: imgInspect.LayersData,
|
||||
Env: imgInspect.Env,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error computing manifest digest")
|
||||
return fmt.Errorf("Error computing manifest digest: %w", err)
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
@@ -207,7 +209,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
|
||||
// action is not allowed.
|
||||
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
|
||||
return errors.Wrapf(err, "Error determining repository tags")
|
||||
return fmt.Errorf("Error determining repository tags: %w", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package inspect
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -19,5 +20,6 @@ type Output struct {
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
LayersData []types.ImageInspectLayer
|
||||
Env []string
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -13,14 +13,13 @@ import (
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -69,25 +68,25 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
rawSource types.ImageSource
|
||||
src types.ImageCloser
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
rawSource, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
src, err = image.FromSource(ctx, sys, rawSource)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
return fmt.Errorf("%w (closing image source: %v)", err, closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
retErr = noteCloseFailure(retErr, "closing image", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -122,7 +121,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
tmpDir, err := os.MkdirTemp(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -137,7 +136,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
retErr = noteCloseFailure(retErr, "closing destination", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -146,7 +145,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
r io.ReadCloser
|
||||
blobSize int64
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -154,14 +153,14 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
return fmt.Errorf("%w (close error: %v)", err, closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var manifest []byte
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
manifest, _, err = src.Manifest(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -3,29 +3,46 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/archive"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// tagListOutput is the output format of (skopeo list-tags), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type tagListOutput struct {
|
||||
Repository string
|
||||
Repository string `json:",omitempty"`
|
||||
Tags []string
|
||||
}
|
||||
|
||||
type tagsOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
var transportHandlers = map[string]func(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error){
|
||||
docker.Transport.Name(): listDockerRepoTags,
|
||||
archive.Transport.Name(): listDockerArchiveTags,
|
||||
}
|
||||
|
||||
// supportedTransports returns all the supported transports
|
||||
func supportedTransports(joinStr string) string {
|
||||
res := make([]string, 0, len(transportHandlers))
|
||||
for handlerName := range transportHandlers {
|
||||
res = append(res, handlerName)
|
||||
}
|
||||
sort.Strings(res)
|
||||
return strings.Join(res, joinStr)
|
||||
}
|
||||
|
||||
func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -38,13 +55,14 @@ func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "list-tags [command options] REPOSITORY-NAME",
|
||||
Short: "List tags in the transport/repository specified by the REPOSITORY-NAME",
|
||||
Long: `Return the list of tags from the transport/repository "REPOSITORY-NAME"
|
||||
Use: "list-tags [command options] SOURCE-IMAGE",
|
||||
Short: "List tags in the transport/repository specified by the SOURCE-IMAGE",
|
||||
Long: `Return the list of tags from the transport/repository "SOURCE-IMAGE"
|
||||
|
||||
Supported transports:
|
||||
docker
|
||||
` + supportedTransports(" ") + `
|
||||
|
||||
See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
|
||||
`,
|
||||
@@ -63,12 +81,12 @@ See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
|
||||
// Would really love to not have this, but needed to enforce tag-less and digest-less names
|
||||
func parseDockerRepositoryReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, docker.Transport.Name()+"://") {
|
||||
return nil, errors.Errorf("docker: image reference %s does not start with %s://", refString, docker.Transport.Name())
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with %s://", refString, docker.Transport.Name())
|
||||
}
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, refString)
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, refString)
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(parts[1], "//"))
|
||||
@@ -90,11 +108,63 @@ func listDockerTags(ctx context.Context, sys *types.SystemContext, imgRef types.
|
||||
|
||||
tags, err := docker.GetRepositoryTags(ctx, sys, imgRef)
|
||||
if err != nil {
|
||||
return ``, nil, fmt.Errorf("Error listing repository tags: %v", err)
|
||||
return ``, nil, fmt.Errorf("Error listing repository tags: %w", err)
|
||||
}
|
||||
return repositoryName, tags, nil
|
||||
}
|
||||
|
||||
// return the tagLists from a docker repo
|
||||
func listDockerRepoTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// return the tagLists from a docker archive file
|
||||
func listDockerArchiveTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
ref, err := alltransports.ParseImageName(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tarReader, _, err := archive.NewReaderForReference(sys, ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
imageRefs, err := tarReader.List()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var repoTags []string
|
||||
for imageIndex, items := range imageRefs {
|
||||
for _, ref := range items {
|
||||
repoTags, err = tarReader.ManifestTagsForReference(ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// handle for each untagged image
|
||||
if len(repoTags) == 0 {
|
||||
repoTags = []string{fmt.Sprintf("@%d", imageIndex)}
|
||||
}
|
||||
tagListing = append(tagListing, repoTags...)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
@@ -113,23 +183,17 @@ func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return fmt.Errorf("Invalid %q: does not specify a transport", args[0])
|
||||
}
|
||||
|
||||
if transport.Name() != docker.Transport.Name() {
|
||||
return fmt.Errorf("Unsupported transport '%v' for tag listing. Only '%v' currently supported", transport.Name(), docker.Transport.Name())
|
||||
}
|
||||
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var repositoryName string
|
||||
var tagListing []string
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
|
||||
if val, ok := transportHandlers[transport.Name()]; ok {
|
||||
repositoryName, tagListing, err = val(ctx, sys, opts, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unsupported transport '%s' for tag listing. Only supported: %s",
|
||||
transport.Name(), supportedTransports(", "))
|
||||
}
|
||||
|
||||
outputData := tagListOutput{
|
||||
|
||||
@@ -63,11 +63,8 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
},
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
// Currently, skopeo uses manually written completions. Cobra allows
|
||||
// for auto-generating completions for various shells. Podman is
|
||||
// already making us of that. If Skopeo decides to follow, please
|
||||
// remove the line below (and hide the `completion` command).
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
// Hide the completion command which is provided by cobra
|
||||
CompletionOptions: cobra.CompletionOptions{HiddenDefaultCmd: true},
|
||||
// This is documented to parse "local" (non-PersistentFlags) flags of parent commands before
|
||||
// running subcommands and handling their options. We don't really run into such cases,
|
||||
// because all of our flags on rootCommand are in PersistentFlags, except for the deprecated --tls-verify;
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -31,7 +31,7 @@ func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
man, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
|
||||
@@ -87,7 +87,8 @@ import (
|
||||
//
|
||||
// 0.2.1: Initial version
|
||||
// 0.2.2: Added support for fetching image configuration as OCI
|
||||
const protocolVersion = "0.2.2"
|
||||
// 0.2.3: Added GetFullConfig
|
||||
const protocolVersion = "0.2.3"
|
||||
|
||||
// maxMsgSize is the current limit on a packet size.
|
||||
// Note that all non-metadata (i.e. payload data) is sent over a pipe.
|
||||
@@ -97,13 +98,13 @@ const maxMsgSize = 32 * 1024
|
||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
|
||||
// We hard error if the input JSON numbers we expect to be
|
||||
// integers are above this.
|
||||
const maxJSONFloat = float64(1<<53 - 1)
|
||||
const maxJSONFloat = float64(uint64(1)<<53 - 1)
|
||||
|
||||
// request is the JSON serialization of a function call
|
||||
type request struct {
|
||||
// Method is the name of the function
|
||||
Method string `json:"method"`
|
||||
// Args is the arguments (parsed inside the fuction)
|
||||
// Args is the arguments (parsed inside the function)
|
||||
Args []interface{} `json:"args"`
|
||||
}
|
||||
|
||||
@@ -430,7 +431,45 @@ func (h *proxyHandler) GetManifest(args []interface{}) (replyBuf, error) {
|
||||
return h.returnBytes(digest, serialized)
|
||||
}
|
||||
|
||||
// GetConfig returns a copy of the image configuration, converted to OCI format.
|
||||
// GetFullConfig returns a copy of the image configuration, converted to OCI format.
|
||||
// https://github.com/opencontainers/image-spec/blob/main/config.md
|
||||
func (h *proxyHandler) GetFullConfig(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting: [imgid]")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
ctx := context.TODO()
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
serialized, err := json.Marshal(&config)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
return h.returnBytes(nil, serialized)
|
||||
}
|
||||
|
||||
// GetConfig returns a copy of the container runtime configuration, converted to OCI format.
|
||||
// Note that due to a historical mistake, this returns not the full image configuration,
|
||||
// but just the container runtime configuration. You should use GetFullConfig instead.
|
||||
func (h *proxyHandler) GetConfig(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
@@ -463,7 +502,6 @@ func (h *proxyHandler) GetConfig(args []interface{}) (replyBuf, error) {
|
||||
return ret, err
|
||||
}
|
||||
return h.returnBytes(nil, serialized)
|
||||
|
||||
}
|
||||
|
||||
// GetBlob fetches a blob, performing digest verification.
|
||||
@@ -626,7 +664,14 @@ func proxyCmd(global *globalOptions) *cobra.Command {
|
||||
// processRequest dispatches a remote request.
|
||||
// replyBuf is the result of the invocation.
|
||||
// terminate should be true if processing of requests should halt.
|
||||
func (h *proxyHandler) processRequest(req request) (rb replyBuf, terminate bool, err error) {
|
||||
func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate bool, err error) {
|
||||
var req request
|
||||
|
||||
// Parse the request JSON
|
||||
if err = json.Unmarshal(readBytes, &req); err != nil {
|
||||
err = fmt.Errorf("invalid request: %v", err)
|
||||
return
|
||||
}
|
||||
// Dispatch on the method
|
||||
switch req.Method {
|
||||
case "Initialize":
|
||||
@@ -639,6 +684,8 @@ func (h *proxyHandler) processRequest(req request) (rb replyBuf, terminate bool,
|
||||
rb, err = h.GetManifest(req.Args)
|
||||
case "GetConfig":
|
||||
rb, err = h.GetConfig(req.Args)
|
||||
case "GetFullConfig":
|
||||
rb, err = h.GetFullConfig(req.Args)
|
||||
case "GetBlob":
|
||||
rb, err = h.GetBlob(req.Args)
|
||||
case "FinishPipe":
|
||||
@@ -677,18 +724,15 @@ func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
return fmt.Errorf("reading socket: %v", err)
|
||||
}
|
||||
// Parse the request JSON
|
||||
readbuf := buf[0:n]
|
||||
var req request
|
||||
if err := json.Unmarshal(readbuf, &req); err != nil {
|
||||
rb := replyBuf{}
|
||||
rb.send(conn, fmt.Errorf("invalid request: %v", err))
|
||||
}
|
||||
|
||||
rb, terminate, err := handler.processRequest(req)
|
||||
rb, terminate, err := handler.processRequest(readbuf)
|
||||
if terminate {
|
||||
return nil
|
||||
}
|
||||
rb.send(conn, err)
|
||||
|
||||
if err := rb.send(conn, err); err != nil {
|
||||
return fmt.Errorf("writing to socket: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,14 +5,16 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type standaloneSignOptions struct {
|
||||
output string // Output file path
|
||||
output string // Output file path
|
||||
passphraseFile string // Path pointing to a passphrase file when signing
|
||||
}
|
||||
|
||||
func standaloneSignCmd() *cobra.Command {
|
||||
@@ -25,6 +27,7 @@ func standaloneSignCmd() *cobra.Command {
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.output, "output", "o", "", "output the signature to `SIGNATURE`")
|
||||
flags.StringVarP(&opts.passphraseFile, "passphrase-file", "", "", "file that contains a passphrase for the --sign-by key")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -36,7 +39,7 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading %s: %v", manifestPath, err)
|
||||
}
|
||||
@@ -46,12 +49,18 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
signature, err := signature.SignDockerManifest(manifest, dockerReference, mech, fingerprint)
|
||||
|
||||
passphrase, err := cli.ReadPassphraseFile(opts.passphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signature, err := signature.SignDockerManifestWithOptions(manifest, dockerReference, mech, fingerprint, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
if err := os.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
@@ -80,11 +89,11 @@ func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
unverifiedManifest, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
unverifiedSignature, err := ioutil.ReadFile(signaturePath)
|
||||
unverifiedSignature, err := os.ReadFile(signaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signature from %s: %v", signaturePath, err)
|
||||
}
|
||||
@@ -130,7 +139,7 @@ func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer)
|
||||
}
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
untrustedSignature, err := os.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading untrusted signature from %s: %v", untrustedSignaturePath, err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -25,9 +24,8 @@ const (
|
||||
// Test that results of runSkopeo failed with nothing on stdout, and substring
|
||||
// within the error message.
|
||||
func assertTestFailed(t *testing.T, stdout string, err error, substring string) {
|
||||
assert.Error(t, err)
|
||||
assert.ErrorContains(t, err, substring)
|
||||
assert.Empty(t, stdout)
|
||||
assert.Contains(t, err.Error(), substring)
|
||||
}
|
||||
|
||||
func TestStandaloneSign(t *testing.T) {
|
||||
@@ -40,8 +38,7 @@ func TestStandaloneSign(t *testing.T) {
|
||||
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", "fixtures")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
@@ -78,7 +75,7 @@ func TestStandaloneSign(t *testing.T) {
|
||||
assertTestFailed(t, out, err, "/dev/full")
|
||||
|
||||
// Success
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err = runSkopeo("standalone-sign", "-o", sigOutput.Name(),
|
||||
@@ -86,9 +83,9 @@ func TestStandaloneSign(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
sig, err := ioutil.ReadFile(sigOutput.Name())
|
||||
sig, err := os.ReadFile(sigOutput.Name())
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
mech, err = signature.NewGPGSigningMechanism()
|
||||
require.NoError(t, err)
|
||||
@@ -103,8 +100,7 @@ func TestStandaloneVerify(t *testing.T) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
signaturePath := "fixtures/image.signature"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", "fixtures")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
|
||||
@@ -2,9 +2,10 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -17,10 +18,10 @@ import (
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
@@ -28,19 +29,23 @@ import (
|
||||
|
||||
// syncOptions contains information retrieved from the skopeo sync command line.
|
||||
type syncOptions struct {
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.Options
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
dryRun bool // Don't actually copy anything, just output what it would have done
|
||||
preserveDigests bool // Preserve digests during sync
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
@@ -101,11 +106,15 @@ See skopeo-sync(1) for details.
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "File that contains a passphrase for the --sign-by key")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.dryRun, "dry-run", false, "Run without actually copying data")
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVarP(&opts.keepGoing, "keep-going", "", false, "Do not abort the sync if any image copy fails")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&deprecatedTLSVerifyFlags)
|
||||
@@ -133,13 +142,13 @@ func (tls *tlsVerifyConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
||||
// It returns a new unmarshaled sourceConfig object and any error encountered.
|
||||
func newSourceConfig(yamlFile string) (sourceConfig, error) {
|
||||
var cfg sourceConfig
|
||||
source, err := ioutil.ReadFile(yamlFile)
|
||||
source, err := os.ReadFile(yamlFile)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
err = yaml.Unmarshal(source, &cfg)
|
||||
if err != nil {
|
||||
return cfg, errors.Wrapf(err, "Failed to unmarshal %q", yamlFile)
|
||||
return cfg, fmt.Errorf("Failed to unmarshal %q: %w", yamlFile, err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -151,7 +160,7 @@ func parseRepositoryReference(input string) (reference.Named, error) {
|
||||
return nil, err
|
||||
}
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return nil, errors.Errorf("input names a reference, not a repository")
|
||||
return nil, errors.New("input names a reference, not a repository")
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
@@ -169,24 +178,24 @@ func destinationReference(destination string, transport string) (types.ImageRefe
|
||||
case directory.Transport.Name():
|
||||
_, err := os.Stat(destination)
|
||||
if err == nil {
|
||||
return nil, errors.Errorf("Refusing to overwrite destination directory %q", destination)
|
||||
return nil, fmt.Errorf("Refusing to overwrite destination directory %q", destination)
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, errors.Wrap(err, "Destination directory could not be used")
|
||||
return nil, fmt.Errorf("Destination directory could not be used: %w", err)
|
||||
}
|
||||
// the directory holding the image must be created here
|
||||
if err = os.MkdirAll(destination, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating directory for image %s", destination)
|
||||
return nil, fmt.Errorf("Error creating directory for image %s: %w", destination, err)
|
||||
}
|
||||
imageTransport = directory.Transport
|
||||
default:
|
||||
return nil, errors.Errorf("%q is not a valid destination transport", transport)
|
||||
return nil, fmt.Errorf("%q is not a valid destination transport", transport)
|
||||
}
|
||||
logrus.Debugf("Destination for transport %q: %s", transport, destination)
|
||||
|
||||
destRef, err := imageTransport.ParseReference(destination)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", imageTransport.Name(), destination)
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", imageTransport.Name(), destination, err)
|
||||
}
|
||||
|
||||
return destRef, nil
|
||||
@@ -206,16 +215,16 @@ func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef refe
|
||||
return nil, err // Should never happen for a reference with tag and no digest
|
||||
}
|
||||
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
|
||||
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case docker.ErrUnauthorizedForCredentials:
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
default:
|
||||
return tags, errors.Wrapf(err, "Error determining repository tags for image %s", name)
|
||||
if err != nil {
|
||||
var unauthorizedForCredentials docker.ErrUnauthorizedForCredentials
|
||||
if errors.As(err, &unauthorizedForCredentials) {
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
tags = nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Error determining repository tags for image %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
@@ -235,11 +244,15 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
for _, tag := range tags {
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating a reference for repository %s and tag %q", repoRef.Name(), tag)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"repo": repoRef.Name(),
|
||||
"tag": tag,
|
||||
}).Errorf("Error creating a tagged reference from registry tag list: %v", err)
|
||||
continue
|
||||
}
|
||||
ref, err := docker.NewReference(taggedRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %s", docker.Transport.Name(), taggedRef.String())
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %s: %w", docker.Transport.Name(), taggedRef.String(), err)
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
}
|
||||
@@ -252,15 +265,15 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
// and any error encountered.
|
||||
func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
var sourceReferences []types.ImageReference
|
||||
err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
|
||||
err := filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
dirname := filepath.Dir(path)
|
||||
ref, err := directory.Transport.ParseReference(dirname)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", directory.Transport.Name(), dirname)
|
||||
return fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", directory.Transport.Name(), dirname, err)
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
return filepath.SkipDir
|
||||
@@ -270,7 +283,7 @@ func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
|
||||
if err != nil {
|
||||
return sourceReferences,
|
||||
errors.Wrapf(err, "Error walking the path %q", dirPath)
|
||||
fmt.Errorf("Error walking the path %q: %w", dirPath, err)
|
||||
}
|
||||
|
||||
return sourceReferences, nil
|
||||
@@ -428,7 +441,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
}
|
||||
named, err := reference.ParseNormalizedNamed(source) // May be a repository or an image.
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), source)
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", docker.Transport.Name(), source, err)
|
||||
}
|
||||
imageTagged := !reference.IsNameOnly(named)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
@@ -438,7 +451,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
if imageTagged {
|
||||
srcRef, err := docker.NewReference(named)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), named.String())
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", docker.Transport.Name(), named.String(), err)
|
||||
}
|
||||
desc.ImageRefs = []types.ImageReference{srcRef}
|
||||
} else {
|
||||
@@ -447,7 +460,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
return descriptors, fmt.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
@@ -458,7 +471,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
}
|
||||
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return descriptors, errors.Wrap(err, "Invalid source directory specified")
|
||||
return descriptors, fmt.Errorf("Invalid source directory specified: %w", err)
|
||||
}
|
||||
desc.DirBasePath = source
|
||||
var err error
|
||||
@@ -467,7 +480,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
return descriptors, fmt.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
|
||||
@@ -486,7 +499,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
|
||||
descs, err := imagesToCopyFromRegistry(registryName, registryConfig, *sourceCtx)
|
||||
if err != nil {
|
||||
return descriptors, errors.Wrapf(err, "Failed to retrieve list of images from registry %q", registryName)
|
||||
return descriptors, fmt.Errorf("Failed to retrieve list of images from registry %q: %w", registryName, err)
|
||||
}
|
||||
descriptors = append(descriptors, descs...)
|
||||
}
|
||||
@@ -495,7 +508,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
return descriptors, nil
|
||||
}
|
||||
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
@@ -503,9 +516,13 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error loading trust policy")
|
||||
return fmt.Errorf("Error loading trust policy: %w", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "tearing down policy context", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// validate source and destination options
|
||||
contains := func(val string, list []string) (_ bool) {
|
||||
@@ -521,20 +538,22 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
return errors.New("A source transport must be specified")
|
||||
}
|
||||
if !contains(opts.source, []string{docker.Transport.Name(), directory.Transport.Name(), "yaml"}) {
|
||||
return errors.Errorf("%q is not a valid source transport", opts.source)
|
||||
return fmt.Errorf("%q is not a valid source transport", opts.source)
|
||||
}
|
||||
|
||||
if len(opts.destination) == 0 {
|
||||
return errors.New("A destination transport must be specified")
|
||||
}
|
||||
if !contains(opts.destination, []string{docker.Transport.Name(), directory.Transport.Name()}) {
|
||||
return errors.Errorf("%q is not a valid destination transport", opts.destination)
|
||||
return fmt.Errorf("%q is not a valid destination transport", opts.destination)
|
||||
}
|
||||
|
||||
if opts.source == opts.destination && opts.source == directory.Transport.Name() {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(transports.Get(opts.destination))
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
@@ -558,7 +577,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
|
||||
sourceArg := args[0]
|
||||
var srcRepoList []repoDescriptor
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -571,17 +590,45 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// c/image/copy.Image does allow creating both simple signing and sigstore signatures simultaneously,
|
||||
// with independent passphrases, but that would make the CLI probably too confusing.
|
||||
// For now, use the passphrase with either, but only one of them.
|
||||
if opts.signPassphraseFile != "" && opts.signByFingerprint != "" && opts.signBySigstorePrivateKey != "" {
|
||||
return fmt.Errorf("Only one of --sign-by and sign-by-sigstore-private-key can be used with sign-passphrase-file")
|
||||
}
|
||||
var passphrase string
|
||||
if opts.signPassphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} else if opts.signBySigstorePrivateKey != "" {
|
||||
p, err := promptForPassphrase(opts.signBySigstorePrivateKey, os.Stdin, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
}
|
||||
options := copy.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
|
||||
ReportWriter: os.Stdout,
|
||||
DestinationCtx: destinationCtx,
|
||||
ImageListSelection: imageListSelection,
|
||||
PreserveDigests: opts.preserveDigests,
|
||||
OptimizeDestinationImageAlreadyExists: true,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
}
|
||||
errorsPresent := false
|
||||
imagesNumber := 0
|
||||
if opts.dryRun {
|
||||
logrus.Warn("Running in dry-run mode")
|
||||
}
|
||||
|
||||
for _, srcRepo := range srcRepoList {
|
||||
options.SourceCtx = srcRepo.Context
|
||||
for counter, ref := range srcRepo.ImageRefs {
|
||||
@@ -608,29 +655,37 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
fromToFields := logrus.Fields{
|
||||
"from": transports.ImageName(ref),
|
||||
"to": transports.ImageName(destRef),
|
||||
}).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
return errors.Wrapf(err, "Error copying ref %q", transports.ImageName(ref))
|
||||
}
|
||||
if opts.dryRun {
|
||||
logrus.WithFields(fromToFields).Infof("Would have copied image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
} else {
|
||||
logrus.WithFields(fromToFields).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
return fmt.Errorf("Error copying ref %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
// log the error, keep a note that there was a failure and move on to the next
|
||||
// image ref
|
||||
errorsPresent = true
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}
|
||||
// log the error, keep a note that there was a failure and move on to the next
|
||||
// image ref
|
||||
errorsPresent = true
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}
|
||||
imagesNumber++
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
if opts.dryRun {
|
||||
logrus.Infof("Would have synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
} else {
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
}
|
||||
if !errorsPresent {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
@@ -22,7 +23,7 @@ func maybeReexec() error {
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
return fmt.Errorf("error reading the current capabilities sets: %w", err)
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -9,15 +10,16 @@ import (
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
@@ -25,6 +27,27 @@ type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// noteCloseFailure returns (possibly-nil) err modified to account for (non-nil) closeErr.
|
||||
// The error for closeErr is annotated with description (which is not a format string)
|
||||
// Typical usage:
|
||||
//
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
func noteCloseFailure(err error, description string, closeErr error) error {
|
||||
// We don’t accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
|
||||
// This also makes it harder for a caller to do
|
||||
// defer noteCloseFailure(returnedErr, …)
|
||||
// which doesn’t use the right value of returnedErr, and doesn’t update it.
|
||||
if err == nil {
|
||||
return fmt.Errorf("%s: %w", description, closeErr)
|
||||
}
|
||||
// In this case we prioritize the primary error for use with %w; closeErr is usually less relevant, or might be a consequence of the primary erorr.
|
||||
return fmt.Errorf("%w (%s: %v)", err, description, closeErr)
|
||||
}
|
||||
|
||||
// commandAction intermediates between the RunE interface and the real handler,
|
||||
// primarily to ensure that cobra.Command is not available to the handler, which in turn
|
||||
// makes sure that the cmd.Flags() etc. flag access functions are not used,
|
||||
@@ -33,8 +56,9 @@ type errorShouldDisplayUsage struct {
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd *cobra.Command, args []string) error {
|
||||
return func(c *cobra.Command, args []string) error {
|
||||
err := handler(args, c.OutOrStdout())
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
c.Help()
|
||||
var shouldDisplayUsage errorShouldDisplayUsage
|
||||
if errors.As(err, &shouldDisplayUsage) {
|
||||
return c.Help()
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -151,8 +175,8 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLS
|
||||
return fs, opts
|
||||
}
|
||||
|
||||
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
|
||||
opts := retry.RetryOptions{}
|
||||
func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
return fs, &opts
|
||||
@@ -221,6 +245,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
// Every user should call imageDestOptions.warnAboutIneffectiveOptions() as part of handling the CLI
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
@@ -229,12 +254,13 @@ type imageDestOptions struct {
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel commonFlag.OptionalInt // Level to use for the compression
|
||||
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
|
||||
imageDestFlagPrefix string
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
opts := imageDestOptions{imageOptions: genericOptions, imageDestFlagPrefix: flagPrefix}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
@@ -272,6 +298,19 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// warnAboutIneffectiveOptions warns if any ineffective option was set by the user
|
||||
// Every user should call this as part of handling the CLI
|
||||
func (opts *imageDestOptions) warnAboutIneffectiveOptions(destTransport types.ImageTransport) {
|
||||
if destTransport.Name() != directory.Transport.Name() {
|
||||
if opts.dirForceCompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"compress")
|
||||
}
|
||||
if opts.dirForceDecompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"decompress")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
@@ -354,3 +393,19 @@ func adjustUsage(c *cobra.Command) {
|
||||
c.SetUsageTemplate(usageTemplate)
|
||||
c.DisableFlagsInUseLine = true
|
||||
}
|
||||
|
||||
// promptForPassphrase interactively prompts for a passphrase related to privateKeyFile
|
||||
func promptForPassphrase(privateKeyFile string, stdin, stdout *os.File) (string, error) {
|
||||
stdinFd := int(stdin.Fd())
|
||||
if !term.IsTerminal(stdinFd) {
|
||||
return "", fmt.Errorf("Cannot prompt for a passphrase for key %s, standard input is not a TTY", privateKeyFile)
|
||||
}
|
||||
|
||||
fmt.Fprintf(stdout, "Passphrase for key %s: ", privateKeyFile)
|
||||
passphrase, err := term.ReadPassword(stdinFd)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading password: %w", err)
|
||||
}
|
||||
fmt.Fprintf(stdout, "\n")
|
||||
return string(passphrase), nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@@ -13,6 +13,27 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNoteCloseFailure(t *testing.T) {
|
||||
const description = "description"
|
||||
|
||||
mainErr := errors.New("main")
|
||||
closeErr := errors.New("closing")
|
||||
|
||||
// Main success, closing failed
|
||||
res := noteCloseFailure(nil, description, closeErr)
|
||||
require.NotNil(t, res)
|
||||
assert.Contains(t, res.Error(), description)
|
||||
assert.Contains(t, res.Error(), closeErr.Error())
|
||||
|
||||
// Both main and closing failed
|
||||
res = noteCloseFailure(mainErr, description, closeErr)
|
||||
require.NotNil(t, res)
|
||||
assert.Contains(t, res.Error(), mainErr.Error())
|
||||
assert.Contains(t, res.Error(), description)
|
||||
assert.Contains(t, res.Error(), closeErr.Error())
|
||||
assert.ErrorIs(t, res, mainErr)
|
||||
}
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) (*globalOptions, *cobra.Command) {
|
||||
app, opts := createApp()
|
||||
@@ -128,17 +149,9 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
oldXRD, hasXRD := os.LookupEnv("REGISTRY_AUTH_FILE")
|
||||
defer func() {
|
||||
if hasXRD {
|
||||
os.Setenv("REGISTRY_AUTH_FILE", oldXRD)
|
||||
} else {
|
||||
os.Unsetenv("REGISTRY_AUTH_FILE")
|
||||
}
|
||||
}()
|
||||
authFile := "/tmp/auth.json"
|
||||
// Make sure when REGISTRY_AUTH_FILE is set the auth file is used
|
||||
os.Setenv("REGISTRY_AUTH_FILE", authFile)
|
||||
t.Setenv("REGISTRY_AUTH_FILE", authFile)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{
|
||||
|
||||
@@ -1,335 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" ] || [ "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$boolean_options $options_with_args" -- "$cur")
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$transports" -- "$cur")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
skopeo "$subcommand" --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--src-authfile
|
||||
--dest-authfile
|
||||
--format -f
|
||||
--sign-by
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-dir
|
||||
--dest-tls-verify
|
||||
--src-daemon-host
|
||||
--dest-daemon-host
|
||||
--src-registry-token
|
||||
--dest-registry-token
|
||||
--src-username
|
||||
--src-password
|
||||
--dest-username
|
||||
--dest-password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all
|
||||
--dest-compress
|
||||
--dest-decompress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
--dest-oci-accept-uncompressed-layers
|
||||
--dest-precompute-digests
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_sync() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--dest
|
||||
--dest-authfile
|
||||
--dest-cert-
|
||||
--dest-creds
|
||||
--dest-registry-token string
|
||||
--format
|
||||
--retry-times
|
||||
--sign-by
|
||||
--src
|
||||
--src-authfile
|
||||
--src-cert-dir
|
||||
--src-creds
|
||||
--src-registry-token
|
||||
--src-username
|
||||
--src-password
|
||||
--dest-username
|
||||
--dest-password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all
|
||||
--dest-no-creds
|
||||
--dest-tls-verify
|
||||
--remove-signatures
|
||||
--scoped
|
||||
--src-no-creds
|
||||
--src-tls-verify
|
||||
--keep-going
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--format
|
||||
--retry-times
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
--no-tags -n
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_standalone_verify() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_manifest_digest() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_delete() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_list_repository_tags() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_login() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--password -p
|
||||
--username -u
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--get-login
|
||||
--tls-verify
|
||||
--password-stdin
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_logout() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all -a
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
# XXX: Changes here need to be reflected in the manually expanded
|
||||
# string in the `case` statement below as well.
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--override-variant
|
||||
--command-timeout
|
||||
--tmpdir
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
|
||||
local commands=(
|
||||
copy
|
||||
delete
|
||||
inspect
|
||||
list-tags
|
||||
login
|
||||
logout
|
||||
manifest-digest
|
||||
standalone-sign
|
||||
standalone-verify
|
||||
sync
|
||||
help
|
||||
h
|
||||
)
|
||||
|
||||
case "$prev" in
|
||||
# XXX: Changes here need to be reflected in $options_with_args as well.
|
||||
--policy|--registries.d|--override-arch|--override-os|--override-variant|--command-timeout)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$boolean_options $options_with_args" -- "$cur")
|
||||
;;
|
||||
*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "${commands[*]} help" -- "$cur")
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur
|
||||
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
local command="skopeo" cpos=0
|
||||
local counter=1
|
||||
while [ $counter -lt "$cword" ]; do
|
||||
case "${words[$counter]}" in
|
||||
skopeo|copy|sync|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
|
||||
command="${words[$counter]//-/_}"
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
declare -F "$completions_func" >/dev/null && $completions_func
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete skopeo
|
||||
@@ -30,12 +30,6 @@ else
|
||||
) > /dev/stderr
|
||||
fi
|
||||
|
||||
OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
|
||||
# GCE image-name compatible string representation of distribution _major_ version
|
||||
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | tr -d '.')"
|
||||
# Combined to ease some usage
|
||||
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
|
||||
|
||||
# This is the magic interpreted by the tests to allow modifying local config/services.
|
||||
SKOPEO_CONTAINER_TESTS=1
|
||||
|
||||
@@ -61,6 +55,13 @@ _run_setup() {
|
||||
# VM's come with the distro. skopeo package pre-installed
|
||||
dnf erase -y skopeo
|
||||
|
||||
# Required for testing the SIF transport
|
||||
dnf install -y fakeroot squashfs-tools
|
||||
|
||||
msg "Removing systemd-resolved from nsswitch.conf"
|
||||
# /etc/resolv.conf is already set to bypass systemd-resolvd
|
||||
sed -i -r -e 's/^(hosts.+)resolve.+dns/\1dns/' /etc/nsswitch.conf
|
||||
|
||||
# A slew of compiled binaries are pre-built and distributed
|
||||
# within the CI/Dev container image, but we want to run
|
||||
# things directly on the host VM. Fortunately they're all
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (ANY changes made to this file, once commited/merged must)
|
||||
[comment]: <> (be manually copy/pasted -in markdown- into the description)
|
||||
[comment]: <> (field on Quay at the following locations:)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (https://quay.io/repository/containers/skopeo)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/stable)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/testing)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/upstream)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
@@ -6,7 +19,7 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains the Dockerfiles necessary to create the skopeoimage container
|
||||
This directory contains the Containerfiles necessary to create the skopeoimage container
|
||||
images that are housed on quay.io under the skopeo account. All repositories where
|
||||
the images live are public and can be pulled without credentials. These container images are secured and the
|
||||
resulting containers can run safely with privileges within the container.
|
||||
@@ -18,22 +31,23 @@ default to `/`.
|
||||
|
||||
The container images are:
|
||||
|
||||
* `quay.io/containers/skopeo:<version>` and `quay.io/skopeo/stable:<version>` -
|
||||
These images are built when a new Skopeo version becomes available in
|
||||
Fedora. These images are intended to be unchanging and stable, they will
|
||||
never be updated by automation once they've been pushed. For build details,
|
||||
please [see the configuration file](stable/Dockerfile).
|
||||
* `quay.io/containers/skopeo:v<version>` and `quay.io/skopeo/stable:v<version>` -
|
||||
These images are built daily. These images are intended contain an unchanging
|
||||
and stable version of skopeo. For the most recent `<version>` tags (`vX`,
|
||||
`vX.Y`, and `vX.Y.Z`) the image contents will be updated daily to incorporate
|
||||
(especially) security updates. For build details, please[see the configuration
|
||||
file](stable/Containerfile).
|
||||
* `quay.io/containers/skopeo:latest` and `quay.io/skopeo/stable:latest` -
|
||||
Built daily using the same Dockerfile as above. The skopeo version
|
||||
will remain the "latest" available in Fedora, however the image
|
||||
Built daily using the same Containerfile as above. The skopeo version
|
||||
will remain the "latest" available in Fedora, however the other image
|
||||
contents may vary compared to the version-tagged images.
|
||||
* `quay.io/skopeo/testing:latest` - This image is built daily, using the
|
||||
latest version of Skopeo that was in the Fedora `updates-testing` repository.
|
||||
The image is Built with [the testing Dockerfile](testing/Dockerfile).
|
||||
The image is Built with [the testing Containerfile](testing/Containerfile).
|
||||
* `quay.io/skopeo/upstream:latest` - This image is built daily using the latest
|
||||
code found in this GitHub repository. Due to the image changing frequently,
|
||||
it's not guaranteed to be stable or even executable. The image is built with
|
||||
[the upstream Dockerfile](upstream/Dockerfile).
|
||||
[the upstream Containerfile](upstream/Containerfile).
|
||||
|
||||
|
||||
## Sample Usage
|
||||
|
||||
47
contrib/skopeoimage/stable/Containerfile
Normal file
47
contrib/skopeoimage/stable/Containerfile
Normal file
@@ -0,0 +1,47 @@
|
||||
# stable/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# stable version of Skopeo on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install skopeo fuse-overlayfs \
|
||||
--exclude container-selinux && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
@@ -1,33 +0,0 @@
|
||||
# stable/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# stable version of Skopeo on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
49
contrib/skopeoimage/testing/Containerfile
Normal file
49
contrib/skopeoimage/testing/Containerfile
Normal file
@@ -0,0 +1,49 @@
|
||||
# testing/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# version of Skopeo that is in updates-testing
|
||||
# on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install skopeo fuse-overlayfs \
|
||||
--exclude container-selinux \
|
||||
--enablerepo updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
@@ -1,34 +0,0 @@
|
||||
# testing/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# version of Skopeo that is in updates-testing
|
||||
# on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
50
contrib/skopeoimage/upstream/Containerfile
Normal file
50
contrib/skopeoimage/upstream/Containerfile
Normal file
@@ -0,0 +1,50 @@
|
||||
# upstream/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# upstream version of Skopeo on GitHub.
|
||||
# https://github.com/containers/skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
|
||||
dnf -y copr enable rhcontainerbot/podman-next && \
|
||||
dnf -y install skopeo \
|
||||
--exclude container-selinux \
|
||||
--enablerepo=updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
@@ -1,54 +0,0 @@
|
||||
# upstream/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# upstream version of Skopeo on GitHub.
|
||||
# https://github.com/containers/skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; \
|
||||
yum -y install make \
|
||||
golang \
|
||||
git \
|
||||
go-md2man \
|
||||
fuse-overlayfs \
|
||||
fuse3 \
|
||||
containers-common \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel --enablerepo updates-testing --exclude container-selinux; \
|
||||
mkdir /root/skopeo; \
|
||||
git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/containers/skopeo; \
|
||||
export GOPATH=/root/skopeo; \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo; \
|
||||
make bin/skopeo;\
|
||||
make PREFIX=/usr install;\
|
||||
rm -rf /root/skopeo/*; \
|
||||
yum -y remove git golang go-md2man make; \
|
||||
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
20
default.yaml
20
default.yaml
@@ -1,19 +1,21 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# sigstore: indicates a location that is read and write
|
||||
# sigstore-staging: indicates a location that is only for write
|
||||
# lookaside: for reading/writing simple signing signatures
|
||||
# lookaside-staging: for writing simple signing signatures, preferred over lookaside
|
||||
#
|
||||
# sigstore and sigstore-staging take a value of the following:
|
||||
# sigstore: {schema}://location
|
||||
# lookaside and lookaside-staging take a value of the following:
|
||||
# lookaside: {schema}://location
|
||||
#
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# This is the default signature write location for docker registries.
|
||||
# The default locations are built-in, for both reading and writing:
|
||||
# /var/lib/containers/sigstore for root, or
|
||||
# ~/.local/share/containers/sigstore for non-root users.
|
||||
default-docker:
|
||||
# sigstore: file:///var/lib/containers/sigstore
|
||||
sigstore-staging: file:///var/lib/containers/sigstore
|
||||
# lookaside: https://…
|
||||
# lookaside-staging: file:///…
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
@@ -21,6 +23,6 @@ default-docker:
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# sigstore: http://privateregistry.com/sigstore/
|
||||
# sigstore-staging: /mnt/nfs/privateregistry/sigstore
|
||||
# lookaside: https://privateregistry.com/sigstore/
|
||||
# lookaside-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
|
||||
@@ -54,6 +54,10 @@ Directory to use to share blobs across OCI repositories.
|
||||
|
||||
After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--preserve-digests**
|
||||
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--encrypt-layer** _ints_
|
||||
|
||||
*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)
|
||||
@@ -66,6 +70,17 @@ MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifes
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--multi-arch** _option_
|
||||
|
||||
Control what is copied if _source-image_ refers to a multi-architecture image. Default is system.
|
||||
|
||||
Options:
|
||||
- system: Copy only the image that matches the system architecture
|
||||
- all: Copy the full multi-architecture image
|
||||
- index-only: Copy only the index
|
||||
|
||||
The index-only option usually fails unless the referenced per-architecture images are already present in the destination, or the target registry supports sparse indexes.
|
||||
|
||||
**--quiet**, **-q**
|
||||
|
||||
Suppress output information when copying images.
|
||||
@@ -74,9 +89,21 @@ Suppress output information when copying images.
|
||||
|
||||
Do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by**=_key-id_
|
||||
**--sign-by** _key-id_
|
||||
|
||||
Add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-passphrase-file** _path_
|
||||
|
||||
The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-private-key`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--sign-identity** _reference_
|
||||
|
||||
The identity to use when signing the image. The identity must be a fully specified docker reference. If the identity is not specified, the target docker reference will be used.
|
||||
|
||||
**--src-shared-blob-dir** _directory_
|
||||
|
||||
@@ -94,15 +121,15 @@ Key to be used for decryption of images. Key can point to keys and/or certificat
|
||||
|
||||
Credentials for accessing the source registry.
|
||||
|
||||
**--dest-compress** _bool-value_
|
||||
**--dest-compress**
|
||||
|
||||
Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
|
||||
**--dest-decompress** _bool-value_
|
||||
**--dest-decompress**
|
||||
|
||||
Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
|
||||
**--dest-oci-accept-uncompressed-layers** _bool-value_
|
||||
**--dest-oci-accept-uncompressed-layers**
|
||||
|
||||
Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
|
||||
|
||||
@@ -114,11 +141,11 @@ Credentials for accessing the destination registry.
|
||||
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
|
||||
|
||||
**--src-no-creds** _bool-value_
|
||||
**--src-no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_
|
||||
**--src-tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to container source registry or daemon. Default to source registry setting.
|
||||
|
||||
@@ -126,11 +153,11 @@ Require HTTPS and verify certificates when talking to container source registry
|
||||
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
|
||||
|
||||
**--dest-no-creds** _bool-value_
|
||||
**--dest-no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--dest-tls-verify** _bool-value_
|
||||
**--dest-tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to container destination registry or daemon. Default to destination registry setting.
|
||||
|
||||
@@ -160,7 +187,7 @@ Bearer token for accessing the source registry.
|
||||
|
||||
Bearer token for accessing the destination registry.
|
||||
|
||||
**--dest-precompute-digests** _bool-value_
|
||||
**--dest-precompute-digests**
|
||||
|
||||
Precompute digests to ensure layers are not uploaded that already exist on the destination registry. Layers with initially unknown digests (ex. compressing "on the fly") will be temporarily streamed to disk.
|
||||
|
||||
|
||||
@@ -6,17 +6,27 @@ skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garb
|
||||
## SYNOPSIS
|
||||
**skopeo delete** [*options*] _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
## DESCRIPTION
|
||||
|
||||
Mark _image-name_ for deletion.
|
||||
The effect of this is registry-specific; many registries don’t support this operation, or don’t allow it in some circumstances / configurations.
|
||||
|
||||
**WARNING**: If _image-name_ contains a digest, this affects the referenced manifest, and may delete all tags (within the current repository?) pointing to that manifest.
|
||||
|
||||
**WARNING**: If _image-name_ contains a tag (but not a digest), in the current version of Skopeo this resolves the tag into a digest, and then deletes the manifest by digest, as described above (possibly deleting all tags pointing to that manifest, not just the provided tag). This behavior may change in the future.
|
||||
|
||||
|
||||
When using the github.com/distribution/distribution registry server:
|
||||
To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
```
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
## OPTIONS
|
||||
@@ -42,7 +52,7 @@ Use docker daemon host at _host_ (`docker-daemon:` transport only)
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--no-creds** _bool-value_
|
||||
**--no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
@@ -76,7 +86,7 @@ The password to access the registry.
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
$ skopeo delete docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
@@ -8,9 +8,12 @@ skopeo\-inspect - Return low-level information about _image-name_ in a registry.
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
Return low-level information about _image-name_ in a registry.
|
||||
See [skopeo(1)](skopeo.1.md) for the format of _image-name_.
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
The default output includes data from various sources: user input (**Name**), the remote repository, if any (**RepoTags**), the top-level manifest (**Digest**),
|
||||
and a per-architecture/OS image matching the current run-time environment (most other values).
|
||||
To see values for a different architecture/OS, use the **--override-os** / **--override-arch** options documented in [skopeo(1)](skopeo.1.md).
|
||||
|
||||
## OPTIONS
|
||||
|
||||
@@ -77,7 +80,7 @@ The username to access the registry.
|
||||
|
||||
The password to access the registry.
|
||||
|
||||
**--no-tags**, **-n**=_bool_
|
||||
**--no-tags**, **-n**
|
||||
|
||||
Do not list the available tags from the repository in the output. When `true`, the `RepoTags` array will be empty. Defaults to `false`, which includes all available tags.
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-list-tags(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-list\-tags - List tags in the transport-specific image repository.
|
||||
skopeo\-list\-tags - List image names in a transport-specific collection of images.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo list-tags** [*options*] _repository-name_
|
||||
**skopeo list-tags** [*options*] _source-image_
|
||||
|
||||
Return a list of tags from _repository-name_ in a registry.
|
||||
Return a list of tags from _source-image_ in a registry or a local docker-archive file.
|
||||
|
||||
_repository-name_ name of repository to retrieve tag listing from
|
||||
_source-image_ name of the repository to retrieve a tag listing from or a local docker-archive file.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
@@ -27,7 +27,7 @@ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--no-creds** _bool-value_
|
||||
**--no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
@@ -53,7 +53,7 @@ The password to access the registry.
|
||||
|
||||
## REPOSITORY NAMES
|
||||
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags".
|
||||
|
||||
This commands refers to repositories using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
@@ -72,6 +72,8 @@ This commands refers to repositories using a _transport_`:`_details_ format. The
|
||||
"docker.io/myuser/myimage:v1.0"
|
||||
"docker.io/myuser/myimage@sha256:f48c4cc192f4c3c6a069cb5cca6d0a9e34d6076ba7c214fd0cc3ca60e0af76bb"
|
||||
|
||||
**docker-archive:path[:docker-reference]
|
||||
more than one images were stored in a docker save-formatted file.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -121,8 +123,48 @@ $ skopeo list-tags docker://localhost:5000/fedora
|
||||
|
||||
```
|
||||
|
||||
### Docker-archive Transport
|
||||
|
||||
To list the tags in a local docker-archive file:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"busybox:1.28.3"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Also supports more than one tags in an archive:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"example.com/empty:latest",
|
||||
"example.com/empty/but:different"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Will include a source-index entry for each untagged image:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/four-tags-with-an-untag.tar
|
||||
{
|
||||
"Tags": [
|
||||
"image1:tag1",
|
||||
"image2:tag2",
|
||||
"@2",
|
||||
"image4:tag4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-transports(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
|
||||
@@ -25,6 +25,10 @@ Print usage statement
|
||||
|
||||
Write signature to _output file_.
|
||||
|
||||
**--passphrase-file**=_path_
|
||||
|
||||
The passphare to use when signing with the key ID from `--sign-by`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-sync(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-sync - Synchronize images between container registries and local directories.
|
||||
skopeo\-sync - Synchronize images between registry repositories and local directories.
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between container registries and local directories.
|
||||
Synchronize images between registry repoositories and local directories.
|
||||
The synchronization is achieved by copying all the images found at _source_ to _destination_.
|
||||
|
||||
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
|
||||
@@ -50,6 +50,10 @@ Path of the authentication file for the source registry. Uses path given by `--a
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dry-run**
|
||||
|
||||
Run the sync without actually copying data to the destination.
|
||||
|
||||
**--src**, **-s** _transport_ Transport for the source repository.
|
||||
|
||||
**--dest**, **-d** _transport_ Destination transport.
|
||||
@@ -62,9 +66,21 @@ Print usage statement.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by**=_key-id_ Add a signature using that key ID for an image name corresponding to _destination-image_.
|
||||
**--sign-by** _key-id_
|
||||
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-passphrase-file** _path_
|
||||
|
||||
The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-private-key`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry.
|
||||
|
||||
@@ -72,15 +88,15 @@ Print usage statement.
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates (*.crt, *.cert, *.key) at _path_ to connect to the source registry or daemon.
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
**--src-no-creds** Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to a container source registry or daemon. Default to source registry entry in registry.conf setting.
|
||||
**--src-tls-verify**=_bool_ Require HTTPS and verify certificates when talking to a container source registry or daemon. Default to source registry entry in registry.conf setting.
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates (*.crt, *.cert, *.key) at _path_ to connect to the destination registry or daemon.
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
**--dest-no-creds** Access the registry anonymously.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to a container destination registry or daemon. Default to destination registry entry in registry.conf setting.
|
||||
**--dest-tls-verify**=_bool_ Require HTTPS and verify certificates when talking to a container destination registry or daemon. Default to destination registry entry in registry.conf setting.
|
||||
|
||||
**--src-registry-token** _Bearer token_ for accessing the source registry.
|
||||
|
||||
|
||||
@@ -102,13 +102,13 @@ Print the version number
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List tags in the transport-specific image repository. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List image names in a transport-specific collection of images.|
|
||||
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between registry repositories and local directories. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
|
||||
598
docs/skopeo.svg
598
docs/skopeo.svg
@@ -1,546 +1,74 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="480.61456"
|
||||
height="472.66098"
|
||||
viewBox="0 0 127.1626 125.05822"
|
||||
version="1.1"
|
||||
id="svg8"
|
||||
inkscape:version="0.92.2 5c3e80d, 2017-08-06"
|
||||
sodipodi:docname="skopeo.svg"
|
||||
inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/skopeo.color.png"
|
||||
inkscape:export-xdpi="90"
|
||||
inkscape:export-ydpi="90">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84477">
|
||||
<stop
|
||||
style="stop-color:#0093d9;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84473" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84475" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84469">
|
||||
<stop
|
||||
style="stop-color:#f6e6c8;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84465" />
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84467" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84461">
|
||||
<stop
|
||||
style="stop-color:#bfdce8;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84457" />
|
||||
<stop
|
||||
style="stop-color:#2a72ac;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84459" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84420">
|
||||
<stop
|
||||
style="stop-color:#a7a9ac;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84416" />
|
||||
<stop
|
||||
style="stop-color:#e7e8e9;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84418" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84347">
|
||||
<stop
|
||||
style="stop-color:#2c2d2f;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84343" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84345" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84339">
|
||||
<stop
|
||||
style="stop-color:#002442;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84335" />
|
||||
<stop
|
||||
style="stop-color:#151617;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84337" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84331">
|
||||
<stop
|
||||
style="stop-color:#003d6e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84327" />
|
||||
<stop
|
||||
style="stop-color:#59b5ff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84329" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84323">
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84319" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84321" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84325"
|
||||
x1="221.5741"
|
||||
y1="250.235"
|
||||
x2="219.20772"
|
||||
y2="221.99771"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84331"
|
||||
id="linearGradient84333"
|
||||
x1="223.23239"
|
||||
y1="212.83418"
|
||||
x2="245.52328"
|
||||
y2="129.64345"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84341"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84347"
|
||||
id="linearGradient84349"
|
||||
x1="212.05453"
|
||||
y1="215.20055"
|
||||
x2="237.73705"
|
||||
y2="230.02835"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84363"
|
||||
x1="193.61516"
|
||||
y1="225.045"
|
||||
x2="224.08698"
|
||||
y2="223.54327"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84377"
|
||||
x1="182.72513"
|
||||
y1="222.54439"
|
||||
x2="184.01024"
|
||||
y2="210.35291"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84408"
|
||||
x1="211.73801"
|
||||
y1="225.48302"
|
||||
x2="204.24324"
|
||||
y2="238.46432"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84422"
|
||||
x1="190.931"
|
||||
y1="221.83777"
|
||||
x2="187.53873"
|
||||
y2="229.26593"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84425"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84441"
|
||||
x1="169.95944"
|
||||
y1="215.77036"
|
||||
x2="174.0289"
|
||||
y2="207.81528"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84455"
|
||||
x1="234.08092"
|
||||
y1="252.39755"
|
||||
x2="245.88477"
|
||||
y2="251.21777"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84461"
|
||||
id="radialGradient84463"
|
||||
cx="213.19594"
|
||||
cy="223.40646"
|
||||
fx="214.12064"
|
||||
fy="217.34077"
|
||||
r="33.39888"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(2.6813748,0.05304973,-0.0423372,2.1399146,-349.74924,-255.6421)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84469"
|
||||
id="radialGradient84471"
|
||||
cx="207.18298"
|
||||
cy="211.06483"
|
||||
fx="207.18298"
|
||||
fy="211.06483"
|
||||
r="2.77954"
|
||||
gradientTransform="matrix(1.4407627,0.18685239,-0.24637721,1.8997405,-38.989952,-218.98841)"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84477"
|
||||
id="linearGradient84479"
|
||||
x1="241.60336"
|
||||
y1="255.46982"
|
||||
x2="244.45177"
|
||||
y2="250.4846"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<svg width="168.71024mm" height="145.54036mm" viewBox="0 0 168.71024 145.54036" version="1.1" id="svg2674" inkscape:version="1.2 (dc2aedaf03, 2022-05-15)" sodipodi:docname="skopeo-badge-full-vert.svg" inkscape:export-filename="skopeo-badge-full-vert.png" inkscape:export-xdpi="51.86108" inkscape:export-ydpi="51.86108" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<defs id="defs2668">
|
||||
<inkscape:path-effect is_visible="true" id="path-effect10334" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect10336" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect9986" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect9984" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect10300" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect10304" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect124972" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect124976" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163593" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163605" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163611" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163615" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163619" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163629" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163633" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163651" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163655" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163597" effect="spiro" lpeversion="0"/>
|
||||
</defs>
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1"
|
||||
inkscape:cx="517.27113"
|
||||
inkscape:cy="314.79773"
|
||||
inkscape:document-units="mm"
|
||||
inkscape:current-layer="layer1"
|
||||
inkscape:document-rotation="0"
|
||||
showgrid="false"
|
||||
units="px"
|
||||
inkscape:snap-global="false"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1376"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0" />
|
||||
<metadata
|
||||
id="metadata5">
|
||||
<sodipodi:namedview id="base" pagecolor="#ffffff" bordercolor="#666666" borderopacity="1.0" inkscape:pageopacity="0.0" inkscape:pageshadow="2" inkscape:zoom="0.7" inkscape:cx="399.28571" inkscape:cy="187.14286" inkscape:document-units="mm" inkscape:current-layer="g1208" showgrid="false" fit-margin-top="10" fit-margin-left="10" fit-margin-right="10" fit-margin-bottom="10" inkscape:window-width="2560" inkscape:window-height="1403" inkscape:window-x="0" inkscape:window-y="0" inkscape:window-maximized="1" inkscape:pagecheckerboard="0" inkscape:showpageshadow="2" inkscape:deskcolor="#d1d1d1"/>
|
||||
<metadata id="metadata2671">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<cc:Work rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-149.15784,-175.92614)">
|
||||
<g
|
||||
id="g84497"
|
||||
style="stroke-width:1.32291663;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
transform="translate(0,10.583333)">
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84485"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="299.48376"
|
||||
y="87.963303"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84487"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="258.07639"
|
||||
y="92.60083"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84489"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="253.2236"
|
||||
y="91.796227"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="86.859642"
|
||||
x="331.21924"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84491"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<g inkscape:label="Layer 1" inkscape:groupmode="layer" id="layer1" transform="translate(378.90631,201.21016)">
|
||||
<g id="g1208">
|
||||
<g id="g81584" transform="matrix(1.7276536,0,0,1.7276536,-401.82487,-530.26362)" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/new skopeo/skopeo-logomark_medium_transparent-bg.png" inkscape:export-xdpi="51.86108" inkscape:export-ydpi="51.86108">
|
||||
<g style="fill:#ffffff;fill-opacity:1;stroke:#3c6eb4;stroke-opacity:1" id="g81528" transform="translate(-734.38295,98.0028)">
|
||||
<path inkscape:connector-curvature="0" style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:#3c6eb4;stroke-width:1.05833;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 796.57913,145.63255 -19.29817,-9.23285 -4.82036,-20.8616 13.2871,-16.780616 21.38926,-0.06408 13.38485,16.701146 -4.69887,20.8897 z" id="path81526"/>
|
||||
</g>
|
||||
<g transform="matrix(0.43729507,0,0,0.43729507,42.235192,80.461942)" id="g81554">
|
||||
<rect style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81530" width="16.725054" height="9.8947001" x="158.13725" y="255.21965" transform="rotate(30)"/>
|
||||
<rect style="fill:#ffffff;stroke:#000000;stroke-width:1.32292;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6" id="rect81532" width="4.8383565" height="11.503917" x="153.28447" y="254.41505" transform="rotate(30)"/>
|
||||
<path sodipodi:nodetypes="cczc" inkscape:connector-curvature="0" id="path81534" d="m 78.802289,335.54596 -9.111984,15.78242 c 1.40192,0.25963 4.990131,-0.63196 7.869989,-5.61868 2.879866,-4.98671 2.168498,-9.07865 1.241995,-10.16374 z" style="fill:#9dc6e7;fill-opacity:1;stroke:#2a72ac;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1"/>
|
||||
<rect transform="rotate(30)" y="250.58212" x="199.54463" height="19.16976" width="31.605196" id="rect81536" style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1"/>
|
||||
<rect transform="rotate(30)" style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81538" width="16.459545" height="15.252436" x="178.48766" y="252.54079"/>
|
||||
<g style="stroke:#808080;stroke-opacity:1" id="g81548">
|
||||
<rect style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81540" width="4.521956" height="21.377089" x="195.04353" y="249.47847" transform="rotate(30)"/>
|
||||
<rect y="251.64348" x="174.76939" height="17.047071" width="3.617183" id="rect81542" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" transform="rotate(30)"/>
|
||||
<rect style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81544" width="4.8383565" height="11.503917" x="153.28447" y="254.41505" transform="rotate(30)"/>
|
||||
<rect y="249.47847" x="231.28011" height="21.377089" width="4.521956" id="rect81546" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81574;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" transform="rotate(30)"/>
|
||||
</g>
|
||||
<path inkscape:connector-curvature="0" id="path81550" d="m 47.691007,322.31629 22.49734,12.98884" style="fill:#ffffff;fill-rule:evenodd;stroke:#ffffff;stroke-width:3.02523;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
|
||||
<path style="fill:#ffffff;fill-rule:evenodd;stroke:#ffffff;stroke-width:3.02523;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 27.886021,312.45704 9.423431,5.07506" id="path81552" inkscape:connector-curvature="0"/>
|
||||
</g>
|
||||
<g transform="matrix(0.43729507,0,0,0.43729507,42.235192,101.28812)" id="g81568">
|
||||
<path style="fill:#2a72ac;fill-opacity:1;stroke:#003e6f;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" d="m 34.507847,231.71327 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z" id="path81556" inkscape:connector-curvature="0" sodipodi:nodetypes="cccccc"/>
|
||||
<path sodipodi:nodetypes="ccccc" inkscape:connector-curvature="0" id="path81558" d="m 28.119527,245.45648 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z" style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6"/>
|
||||
<path style="fill:#4d4d4d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 24.616967,251.5299 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z" id="path81560" inkscape:connector-curvature="0" sodipodi:nodetypes="ccccc"/>
|
||||
<ellipse ry="3.8438656" rx="3.8395541" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="ellipse81562" cx="39.230743" cy="255.66997"/>
|
||||
<path sodipodi:nodetypes="ccc" style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#9dc6e7;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 71.999346,266.02935 -8.9307,-5.38071 10.81942,-5.07707" id="path81564" inkscape:connector-curvature="0"/>
|
||||
<path style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#9dc6e7;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 35.169799,245.57008 10.37702,-6.1817 -7.12581,-2.30459" id="path81566" inkscape:connector-curvature="0" sodipodi:nodetypes="ccc"/>
|
||||
</g>
|
||||
<g style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-opacity:1" id="g81582" transform="translate(0.69195604,69.064926)">
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 83.087609,145.72448 -3.6551,1.27991" id="path81570" inkscape:connector-curvature="0" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 51.138114,129.84674 1.971302,3.71206" id="path81572" inkscape:connector-curvature="0" inkscape:export-xdpi="96.181694" inkscape:export-ydpi="96.181694"/>
|
||||
<path inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" inkscape:connector-curvature="0" id="path81574" d="m 70.63337,129.84674 -2.345479,4.17978" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" sodipodi:nodetypes="cc" inkscape:export-xdpi="96.181694" inkscape:export-ydpi="96.181694"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" inkscape:connector-curvature="0" id="path81576" d="m 61.405599,166.31541 v 5.83669" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" inkscape:connector-curvature="0" id="path81578" d="m 43.729779,164.25283 4.216366,-4.18995" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" sodipodi:nodetypes="cc" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 79.100039,164.25283 -1.50358,-1.57071" id="path81580" inkscape:connector-curvature="0" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
</g>
|
||||
</g>
|
||||
<text id="text81524" y="-73.044861" x="-363.40085" style="font-style:normal;font-weight:normal;font-size:37.592px;line-height:22.5552px;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e1ae4f;fill-opacity:1;stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" xml:space="preserve"><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#e1ae4f;fill-opacity:1;stroke-width:0.264583px" y="-73.044861" x="-363.40085" id="tspan81522" sodipodi:role="line" dx="0 0 0 0 0 0"><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#294172;fill-opacity:1" id="tspan81514">sk</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#2a72ac;fill-opacity:1" id="tspan81516">o</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#294172;fill-opacity:1" id="tspan81518">pe</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#2a72ac;fill-opacity:1" id="tspan81520">o</tspan></tspan></text>
|
||||
</g>
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
id="path84483"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="cccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84481"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<circle
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="path84224"
|
||||
cx="213.64427"
|
||||
cy="234.18927"
|
||||
r="35.482784" />
|
||||
<circle
|
||||
r="33.39888"
|
||||
cy="234.18927"
|
||||
cx="213.64427"
|
||||
id="circle84226"
|
||||
style="fill:url(#radialGradient84463);fill-opacity:1;stroke:none;stroke-width:0.52916664;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84114"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="304.77545"
|
||||
y="97.128738"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84116"
|
||||
width="4.521956"
|
||||
height="21.377089"
|
||||
x="300.27435"
|
||||
y="96.025078"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="99.087395"
|
||||
x="283.71848"
|
||||
height="15.252436"
|
||||
width="16.459545"
|
||||
id="rect84118"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="98.190086"
|
||||
x="280.00021"
|
||||
height="17.047071"
|
||||
width="3.617183"
|
||||
id="rect84120"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84122"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="263.36807"
|
||||
y="101.76627"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84124"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="258.51526"
|
||||
y="100.96166"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="96.025078"
|
||||
x="336.51093"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84126"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84325);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 207.24023,252.71811 25.53907,14.74414 8.52539,-14.76953 -25.53711,-14.74415 z"
|
||||
id="rect84313"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84128"
|
||||
d="m 215.3335,241.36799 22.49734,12.98884"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84130"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 195.97877,212.80238 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z"
|
||||
id="path84134"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccccc" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
id="path84136"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84422);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 186.31445,239.41146 1.30078,0.75 7.46485,-12.92968 -1.30078,-0.75 z"
|
||||
id="rect84410"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84349);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 193.92188,218.48568 44.21289,25.55469 2.44335,-4.23242 -44.21289,-25.55664 z"
|
||||
id="path84284"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84363);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 189.98438,240.4935 12.42187,7.16992 6.56641,-11.375 -12.42188,-7.16992 z"
|
||||
id="rect84351"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84377);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 173.69727,227.99936 12.65234,7.30273 3.88867,-6.73633 -12.65234,-7.30273 z"
|
||||
id="rect84365"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84138"
|
||||
d="m 192.47621,218.8758 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<ellipse
|
||||
cy="223.01579"
|
||||
cx="207.08998"
|
||||
id="circle84140"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
rx="3.8395541"
|
||||
ry="3.8438656" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84333);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 197.35938,212.35287 44.36523,25.64453 7.58984,-10.83203 -20.82617,-18.73242 -25.55078,-8.08399 z"
|
||||
id="path84272"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84142"
|
||||
d="m 200.6837,212.37603 11.49279,-6.98413 -8.11935,-2.73742"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84144"
|
||||
d="m 241.31895,235.3047 -8.04514,-4.75769 10.057,-4.72299"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccc"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.52899998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 241.06868,235.79543 -8.9307,-5.38071 10.81942,-5.07707"
|
||||
id="path84280"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 200.60886,211.70589 10.37702,-6.1817 -7.12581,-2.30459"
|
||||
id="path84290"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
style="fill:url(#radialGradient84471);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 206.89258,220.23959 -0.29297,0.0352 -0.23633,0.0527 -0.26953,0.0898 -0.2793,0.125 -0.23437,0.13477 -0.20508,0.14648 -0.2207,0.19532 -0.18946,0.20117 -0.006,0.008 0.004,-0.008 -0.006,0.01 -0.008,0.01 -0.004,0.004 -0.006,0.006 -0.12109,0.1582 -0.002,0.004 -0.002,0.002 -0.16406,0.26758 -0.12109,0.24804 -0.0996,0.28125 -0.0645,0.24219 -0.0371,0.26367 -0.0176,0.31641 0.008,0.18164 0.0332,0.28711 0.0527,0.23437 0.004,0.0117 0.0937,0.28516 0.11133,0.24805 0.13086,0.23046 0.16992,0.23829 0.1836,0.20898 0.21093,0.19727 0.19532,0.14843 0.25586,0.15625 0.24218,0.11719 0.26172,0.0977 0.27344,0.0684 0.27344,0.043 0.29297,0.0137 0.18164,-0.008 0.29687,-0.0351 0.24024,-0.0547 0.27539,-0.0898 0.24218,-0.10938 0.25,-0.14453 0.23047,-0.16406 0.20899,-0.1836 0.20508,-0.21875 0.125,-0.16406 0.004,-0.006 0.1582,-0.25781 0.004,-0.008 0.12695,-0.26172 0.0996,-0.27344 0.002,-0.006 0.0586,-0.24023 0.0391,-0.26563 0.0176,-0.3125 -0.008,-0.17968 -0.0332,-0.28711 -0.0527,-0.23438 -0.004,-0.0117 -0.0937,-0.28515 -0.11132,-0.24805 -0.13086,-0.23047 -0.16993,-0.23828 -0.18554,-0.20899 -0.19922,-0.18945 -0.21875,-0.16406 -0.23828,-0.14844 -0.26563,-0.12695 -0.01,-0.004 -0.21875,-0.0801 -0.28516,-0.0723 -0.27344,-0.043 -0.29492,-0.0137 z"
|
||||
id="ellipse84292"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84425);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 183.23633,227.10092 c 5.59753,3.20336 12.36881,4.51528 18.71366,3.17108 1.59516,-0.38 3.17489,-0.99021 4.44874,-2.04739 -0.73893,-0.64617 -1.68301,-0.99544 -2.49844,-1.53493 -3.78032,-2.18293 -7.56064,-4.36587 -11.34096,-6.5488 -3.10767,2.32001 -6.21533,4.64003 -9.323,6.96004 z"
|
||||
id="path84298"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84479);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 238.62695,269.97787 0.006,-0.002 0.39453,-0.27735 0.41797,-0.34179 0.002,-0.002 0.45703,-0.42382 0.47851,-0.49219 0.0156,-0.0176 0.47656,-0.53711 0.002,-0.002 0.0117,-0.0137 0.48438,-0.5918 0.0117,-0.0156 0.49023,-0.64257 0.01,-0.0137 0.49609,-0.69726 0.48047,-0.71875 0.01,-0.0137 0.46485,-0.74805 0.004,-0.008 0.002,-0.002 0.30468,-0.51562 0.008,-0.0117 0.4375,-0.78711 0.40625,-0.77734 0.008,-0.0137 0.37109,-0.77149 0.008,-0.0156 0.33789,-0.75977 0.006,-0.0156 0.30078,-0.73829 0.27148,-0.74609 0.21289,-0.66602 0.17969,-0.66796 v -0.002 l 0.12305,-0.58203 0.002,-0.0137 0.0723,-0.51562 0.0176,-0.31836 z"
|
||||
id="path84379"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84408);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 202.78906,251.42318 2.08399,1.20118 9.6289,-16.67969 -2.08203,-1.20117 z"
|
||||
id="rect84396"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84441);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 169.0918,226.26889 2.35937,1.36133 4.69336,-8.13086 -2.35937,-1.36133 z"
|
||||
id="rect84429"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84455);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 234.17188,269.53842 2.08203,1.20312 9.63086,-16.67773 -2.08399,-1.20313 z"
|
||||
id="rect84443"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#f8ead2;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 215.55025,240.82707 22.49734,12.98884"
|
||||
id="path84521"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 14 KiB |
102
go.mod
102
go.mod
@@ -1,24 +1,100 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
go 1.12
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.46.1-0.20211026130826-7abfd453c86f
|
||||
github.com/containers/image/v5 v5.17.0
|
||||
github.com/containers/ocicrypt v1.1.2
|
||||
github.com/containers/storage v1.37.0
|
||||
github.com/docker/docker v20.10.11+incompatible
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/containers/common v0.50.1
|
||||
github.com/containers/image/v5 v5.23.0
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/storage v1.43.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
|
||||
github.com/opencontainers/image-spec v1.1.0-rc1
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.2.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/containerd/cgroups v1.0.4 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.18+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.11.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/honeycombio/libhoney-go v1.15.8 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||
github.com/kr/pretty v0.2.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/opencontainers/selinux v1.10.2 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sigstore/sigstore v1.4.2 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.8.0 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
|
||||
@@ -34,7 +34,7 @@ if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
|
||||
echo " the Makefile targets WITHOUT the '-local' suffix."
|
||||
echo "***************************************************************"
|
||||
) > /dev/stderr
|
||||
sleep 5s
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Before running podman for the first time, make sure
|
||||
# to set storage to vfs (not overlay): podman-in-podman
|
||||
# doesn't work with overlay. And, disable mountopt,
|
||||
# which causes error with vfs.
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
# These tests can run in/outside of a container. However,
|
||||
# not all storage drivers are supported in a container
|
||||
# environment. Detect this and setup storage when
|
||||
# running in a container.
|
||||
if ((SKOPEO_CONTAINER_TESTS)) && [[ -r /etc/containers/storage.conf ]]; then
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
elif ((SKOPEO_CONTAINER_TESTS)); then
|
||||
cat >> /etc/containers/storage.conf << EOF
|
||||
[storage]
|
||||
driver = "vfs"
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
make PREFIX=/usr install
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
|
||||
49
install.md
49
install.md
@@ -1,7 +1,8 @@
|
||||
# Installing Skopeo
|
||||
|
||||
## Distribution Packages
|
||||
`skopeo` may already be packaged in your distribution.
|
||||
`skopeo` may already be packaged in your distribution. This document lists the
|
||||
installation steps for many distros, along with their information and support links.
|
||||
|
||||
### Fedora
|
||||
|
||||
@@ -9,30 +10,51 @@
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
### RHEL/CentOS ≥ 8 and CentOS Stream
|
||||
[Package Info](https://src.fedoraproject.org/rpms/skopeo) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Fedora&component=skopeo&product=Fedora)
|
||||
|
||||
Fedora bugs can be reported on the Skopeo GitHub [Issues](https://github.com/containers/skopeo/issues) page.
|
||||
|
||||
### RHEL / CentOS Stream ≥ 8
|
||||
|
||||
```sh
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
If you are a RHEL customer, please reach out through the official RHEL support
|
||||
channels for any issues.
|
||||
|
||||
CentOS Stream 9: [Package Info](https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/tree/c9s) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%209&version=CentOS%20Stream)
|
||||
|
||||
CentOS Stream 8: [Package Info](https://git.centos.org/rpms/skopeo/tree/c8s-stream-rhel8) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%208&version=CentOS%20Stream)
|
||||
|
||||
|
||||
### RHEL/CentOS ≤ 7.x
|
||||
|
||||
```sh
|
||||
sudo yum -y install skopeo
|
||||
```
|
||||
|
||||
CentOS 7: [Package Repo](https://git.centos.org/rpms/skopeo/tree/c7-extras)
|
||||
|
||||
### openSUSE
|
||||
|
||||
```sh
|
||||
sudo zypper install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://software.opensuse.org/package/skopeo)
|
||||
|
||||
### Alpine
|
||||
|
||||
```sh
|
||||
sudo apk add skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://pkgs.alpinelinux.org/packages?name=skopeo)
|
||||
|
||||
### macOS
|
||||
|
||||
```sh
|
||||
@@ -44,6 +66,8 @@ brew install skopeo
|
||||
$ nix-env -i skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://search.nixos.org/packages?&show=skopeo&query=skopeo)
|
||||
|
||||
### Debian
|
||||
|
||||
The skopeo package is available on [Bullseye](https://packages.debian.org/bullseye/skopeo),
|
||||
@@ -55,6 +79,8 @@ sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://packages.debian.org/stable/skopeo)
|
||||
|
||||
### Raspberry Pi OS arm64 (beta)
|
||||
|
||||
Raspberry Pi OS uses the standard Debian's repositories,
|
||||
@@ -73,17 +99,7 @@ sudo apt-get -y update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
The [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
provides packages for Ubuntu 20.04 (it should also work with direct derivatives like Pop!\_OS).
|
||||
|
||||
```bash
|
||||
. /etc/os-release
|
||||
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y upgrade
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
[Package Info](https://packages.ubuntu.com/jammy/skopeo)
|
||||
|
||||
### Windows
|
||||
Skopeo has not yet been packaged for Windows. There is an [open feature
|
||||
@@ -196,6 +212,13 @@ Building in a container is simpler, but more restrictive:
|
||||
$ make binary
|
||||
```
|
||||
|
||||
### Shell completion scripts
|
||||
|
||||
Skopeo has shell completion scripts for bash, zsh, fish and powershell. They are installed as part of `make install`.
|
||||
You may have to restart your shell in order for them to take effect.
|
||||
|
||||
For instructions to manually generate and load the scripts please see `skopeo completion --help`.
|
||||
|
||||
### Installation
|
||||
|
||||
Finally, after the binary and documentation is built:
|
||||
|
||||
@@ -36,12 +36,12 @@ func (s *SkopeoSuite) SetUpSuite(c *check.C) {
|
||||
|
||||
func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
if s.regV2 != nil {
|
||||
s.regV2.Close()
|
||||
s.regV2.tearDown(c)
|
||||
}
|
||||
if s.regV2WithAuth != nil {
|
||||
//cmd := exec.Command("docker", "logout", s.regV2WithAuth)
|
||||
//c.Assert(cmd.Run(), check.IsNil)
|
||||
s.regV2WithAuth.Close()
|
||||
s.regV2WithAuth.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -64,9 +64,7 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, false, false)
|
||||
s.s1Registry = setupRegistryV2At(c, v2s1DockerRegistryURL, false, true)
|
||||
|
||||
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.gpgHome = gpgHome
|
||||
s.gpgHome = c.MkDir()
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
@@ -75,21 +73,18 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
err := os.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
os.RemoveAll(s.gpgHome)
|
||||
}
|
||||
if s.registry != nil {
|
||||
s.registry.Close()
|
||||
s.registry.tearDown(c)
|
||||
}
|
||||
if s.s1Registry != nil {
|
||||
s.s1Registry.Close()
|
||||
s.s1Registry.tearDown(c)
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
@@ -97,104 +92,81 @@ func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestList(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dir := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestList(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-all-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dir := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir1, "oci:"+oci2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci2, "dir:"+dir2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir1, "oci:"+oci2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci2, "dir:"+dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) {
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir2, "oci:"+oci2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir2, "oci:"+oci2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoneWithManifestList(c *check.C) {
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=index-only", knownListImage, "dir:"+dir1)
|
||||
|
||||
manifestPath := filepath.Join(dir1, "manifest.json")
|
||||
readManifest, err := os.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(readManifest)
|
||||
c.Assert(mimeType, check.Equals, "application/vnd.docker.distribution.manifest.list.v2+json")
|
||||
out := combinedOutputOfCommand(c, "ls", "-1", dir1)
|
||||
c.Assert(out, check.Equals, "manifest.json\nversion\n")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) {
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir2, "oci:"+oci2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir2, "oci:"+oci2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListStorageFails(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-storage")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--all", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--multi-arch=all", knownListImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2)
|
||||
@@ -203,16 +175,10 @@ func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "dir:"+dir1)
|
||||
@@ -222,24 +188,16 @@ func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
oci1, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir1, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir2, "oci:"+oci2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
@@ -247,31 +205,21 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithDigestfileOutput(c *check.C) {
|
||||
tempdir, err := ioutil.TempDir("", "tempdir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tempdir)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
tempdir := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
digestOutPath := filepath.Join(tempdir, "digest.txt")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1)
|
||||
readDigest, err := ioutil.ReadFile(digestOutPath)
|
||||
readDigest, err := os.ReadFile(digestOutPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = digest.Parse(string(readDigest))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -284,16 +232,10 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -306,9 +248,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-both")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -328,9 +268,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUsesListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-first")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -364,9 +302,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUsesListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-second")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -400,9 +336,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUsesListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-third")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -436,9 +370,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-tag-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -481,28 +413,20 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenImageOSDoesNotMatchRuntimeOS(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-fails-image-does-not-match-runtime")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*no image found in manifest list for architecture .*, variant .*, OS .*`, "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySucceedsWhenImageDoesNotMatchRuntimeButWeOverride(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-succeeds-image-does-not-match-runtime-but-override")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoSucceeds(c, "", "--override-os=windows", "--override-arch=amd64", "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
@@ -518,12 +442,8 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
@@ -542,7 +462,7 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
ociImgName := "pause"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
_, err = os.Stat(ociDest)
|
||||
_, err := os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// docker v2s2 -> OCI image layout without image name
|
||||
@@ -554,31 +474,14 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
|
||||
originalImageDir, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(originalImageDir)
|
||||
encryptedImgDir, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(encryptedImgDir)
|
||||
decryptedImgDir, err := ioutil.TempDir("", "copy-3")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(decryptedImgDir)
|
||||
keysDir, err := ioutil.TempDir("", "copy-4")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(keysDir)
|
||||
undecryptedImgDir, err := ioutil.TempDir("", "copy-5")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(undecryptedImgDir)
|
||||
multiLayerImageDir, err := ioutil.TempDir("", "copy-6")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(multiLayerImageDir)
|
||||
partiallyEncryptedImgDir, err := ioutil.TempDir("", "copy-7")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(partiallyEncryptedImgDir)
|
||||
partiallyDecryptedImgDir, err := ioutil.TempDir("", "copy-8")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(partiallyDecryptedImgDir)
|
||||
originalImageDir := c.MkDir()
|
||||
encryptedImgDir := c.MkDir()
|
||||
decryptedImgDir := c.MkDir()
|
||||
keysDir := c.MkDir()
|
||||
undecryptedImgDir := c.MkDir()
|
||||
multiLayerImageDir := c.MkDir()
|
||||
partiallyEncryptedImgDir := c.MkDir()
|
||||
partiallyDecryptedImgDir := c.MkDir()
|
||||
|
||||
// Create RSA key pair
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
@@ -587,9 +490,9 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
|
||||
publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = ioutil.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644)
|
||||
err = os.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = ioutil.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644)
|
||||
err = os.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// We can either perform encryption or decryption on the image.
|
||||
@@ -613,7 +516,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
invalidPrivateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
c.Assert(err, check.IsNil)
|
||||
invalidPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(invalidPrivateKey)
|
||||
err = ioutil.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644)
|
||||
err = os.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoFails(c, ".*no suitable key unwrapper found or none of the private keys could be used for decryption.*",
|
||||
"copy", "--decryption-key", keysDir+"/invalid_private.key",
|
||||
@@ -653,7 +556,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
}
|
||||
|
||||
func matchLayerBlobBinaryType(c *check.C, ociImageDirPath string, contentType string, matchCount int) {
|
||||
files, err := ioutil.ReadDir(ociImageDirPath)
|
||||
files, err := os.ReadDir(ociImageDirPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
foundCount := 0
|
||||
@@ -689,7 +592,7 @@ func assertDirImagesAreEqual(c *check.C, dir1, dir2 string) {
|
||||
digests := []digest.Digest{}
|
||||
for _, dir := range []string{dir1, dir2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
digest, err := manifest.Digest(m)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -707,7 +610,7 @@ func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref
|
||||
manifests := []map[string]interface{}{}
|
||||
for dir, ref := range map[string]string{dir1: ref1, dir2: ref2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
data := map[string]interface{}{}
|
||||
err = json.Unmarshal(m, &data)
|
||||
@@ -730,12 +633,8 @@ func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref
|
||||
|
||||
// Streaming (skopeo copy)
|
||||
func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "streaming-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "streaming-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
@@ -755,12 +654,8 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
oci1, err := ioutil.TempDir("", "oci-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "oci-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest")
|
||||
@@ -783,7 +678,7 @@ func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
// Verify using the upstream OCI image validator, this should catch most
|
||||
// non-compliance errors. DO NOT REMOVE THIS TEST UNLESS IT'S ABSOLUTELY
|
||||
// NECESSARY.
|
||||
err = image.ValidateLayout(oci1, nil, logger)
|
||||
err := image.ValidateLayout(oci1, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = image.ValidateLayout(oci2, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -805,9 +700,7 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
dir, err := ioutil.TempDir("", "signatures-dest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dir := c.MkDir()
|
||||
dirDest := "dir:" + dir
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
@@ -861,9 +754,7 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "dir-signatures-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
topDirDest := "dir:" + topDir
|
||||
|
||||
for _, suffix := range []string{"/dir1", "/dir2", "/restricted/personal", "/restricted/official", "/restricted/badidentity", "/dest"} {
|
||||
@@ -906,9 +797,7 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
|
||||
topDir, err := ioutil.TempDir("", "compression-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
|
||||
for i, t := range []struct{ fixture, remote string }{
|
||||
{"uncompressed-image-s1", "docker://" + v2DockerRegistryURL + "/compression/compression:s1"},
|
||||
@@ -943,21 +832,21 @@ func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
|
||||
func findRegularFiles(c *check.C, root string) []string {
|
||||
result := []string{}
|
||||
err := filepath.Walk(root, filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
|
||||
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode().IsRegular() {
|
||||
if d.Type().IsRegular() {
|
||||
result = append(result, path)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
return result
|
||||
}
|
||||
|
||||
// --sign-by and policy use for docker: with sigstore
|
||||
func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
// --sign-by and policy use for docker: with lookaside
|
||||
func (s *CopySuite) TestCopyDockerLookaside(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
@@ -967,21 +856,19 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "signatures-sigstore")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
copyDest := filepath.Join(tmpDir, "dest")
|
||||
err = os.Mkdir(copyDest, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
dirDest := "dir:" + copyDest
|
||||
plainSigstore := filepath.Join(tmpDir, "sigstore")
|
||||
splitSigstoreStaging := filepath.Join(tmpDir, "sigstore-staging")
|
||||
plainLookaside := filepath.Join(tmpDir, "lookaside")
|
||||
splitLookasideStaging := filepath.Join(tmpDir, "lookaside-staging")
|
||||
|
||||
splitSigstoreReadServerHandler := http.NotFoundHandler()
|
||||
splitSigstoreReadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
splitSigstoreReadServerHandler.ServeHTTP(w, r)
|
||||
splitLookasideReadServerHandler := http.NotFoundHandler()
|
||||
splitLookasideReadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
splitLookasideReadServerHandler.ServeHTTP(w, r)
|
||||
}))
|
||||
defer splitSigstoreReadServer.Close()
|
||||
defer splitLookasideReadServer.Close()
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
@@ -989,20 +876,20 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
err = os.Mkdir(registriesDir, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
registriesFile := fileFromFixture(c, "fixtures/registries.yaml",
|
||||
map[string]string{"@sigstore@": plainSigstore, "@split-staging@": splitSigstoreStaging, "@split-read@": splitSigstoreReadServer.URL})
|
||||
map[string]string{"@lookaside@": plainLookaside, "@split-staging@": splitLookasideStaging, "@split-read@": splitLookasideReadServer.URL})
|
||||
err = os.Symlink(registriesFile, filepath.Join(registriesDir, "registries.yaml"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Get an image to work with. Also verifies that we can use Docker repositories with no sigstore configured.
|
||||
// Get an image to work with. Also verifies that we can use Docker repositories with no lookaside configured.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, ourRegistry+"original/busybox")
|
||||
// Pulling an unsigned image fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest)
|
||||
|
||||
// Signing with sigstore defined succeeds,
|
||||
// Signing with lookaside defined succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"signed/busybox")
|
||||
// a signature file has been created,
|
||||
foundFiles := findRegularFiles(c, plainSigstore)
|
||||
foundFiles := findRegularFiles(c, plainLookaside)
|
||||
c.Assert(foundFiles, check.HasLen, 1)
|
||||
// and pulling a signed image succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"signed/busybox", dirDest)
|
||||
@@ -1010,19 +897,19 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
// Deleting the image succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "delete", ourRegistry+"signed/busybox")
|
||||
// and the signature file has been deleted (but we leave the directories around).
|
||||
foundFiles = findRegularFiles(c, plainSigstore)
|
||||
foundFiles = findRegularFiles(c, plainLookaside)
|
||||
c.Assert(foundFiles, check.HasLen, 0)
|
||||
|
||||
// Signing with a read/write sigstore split succeeds,
|
||||
// Signing with a read/write lookaside split succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"public/busybox")
|
||||
// and a signature file has been created.
|
||||
foundFiles = findRegularFiles(c, splitSigstoreStaging)
|
||||
foundFiles = findRegularFiles(c, splitLookasideStaging)
|
||||
c.Assert(foundFiles, check.HasLen, 1)
|
||||
// Pulling the image fails because the read sigstore URL has not been populated:
|
||||
// Pulling the image fails because the read lookaside URL has not been populated:
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
|
||||
// Pulling the image succeeds after the read sigstore URL is available:
|
||||
splitSigstoreReadServerHandler = http.FileServer(http.Dir(splitSigstoreStaging))
|
||||
// Pulling the image succeeds after the read lookaside URL is available:
|
||||
splitLookasideReadServerHandler = http.FileServer(http.Dir(splitLookasideStaging))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
|
||||
}
|
||||
|
||||
@@ -1035,9 +922,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "atomic-extension")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
for _, subdir := range []string{"dirAA", "dirAD", "dirDA", "dirDD", "registries.d"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -1084,22 +969,6 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
|
||||
}
|
||||
|
||||
// copyWithSignedIdentity creates a copy of an unsigned image, adding a signature for an unrelated identity
|
||||
// This should be easier than using standalone-sign.
|
||||
func copyWithSignedIdentity(c *check.C, src, dest, signedIdentity, signBy, registriesDir string) {
|
||||
topDir, err := ioutil.TempDir("", "copyWithSignedIdentity")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
signingDir := filepath.Join(topDir, "signing-temp")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", src, "dir:"+signingDir)
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
|
||||
filepath.Join(signingDir, "manifest.json"), signedIdentity, signBy)
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, dest)
|
||||
}
|
||||
|
||||
// Both mirroring support in registries.conf, and mirrored remapIdentity support in policy.json
|
||||
func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
const regPrefix = "docker://localhost:5006/myns/mirroring-"
|
||||
@@ -1111,16 +980,14 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "mirrored-signatures")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable sigstore use
|
||||
topDir := c.MkDir()
|
||||
registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable lookaside use
|
||||
dirDest := "dir:" + filepath.Join(topDir, "unused-dest")
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
|
||||
// We use X-R-S-S for this testing to avoid having to deal with the sigstores.
|
||||
// We use X-R-S-S for this testing to avoid having to deal with the lookasides.
|
||||
// A downside is that OpenShift records signatures per image, so the error messages below
|
||||
// list all signatures for other tags used for the same image as well.
|
||||
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
|
||||
@@ -1145,10 +1012,12 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)
|
||||
|
||||
// Fail if we specify an unqualified identity
|
||||
assertSkopeoFails(c, ".*Could not parse --sign-identity: repository name must be canonical.*",
|
||||
"--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=this-is-not-fully-specified", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed")
|
||||
|
||||
// Create a signature for mirroring-primary:primary-signed without pushing there.
|
||||
copyWithSignedIdentity(c, regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed",
|
||||
"localhost:5006/myns/mirroring-primary:primary-signed", "personal@example.com",
|
||||
registriesDir)
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:primary-signed", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed")
|
||||
// Verify that a correctly signed image for the primary is accessible using the primary's reference
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:primary-signed", dirDest)
|
||||
// … but verify that while it is accessible using the mirror location
|
||||
@@ -1163,9 +1032,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
// … it is NOT accessible when requiring a signature …
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// … until signed.
|
||||
copyWithSignedIdentity(c, regPrefix+"remap:remapped", regPrefix+"remap:remapped",
|
||||
"localhost:5006/myns/mirroring-primary:remapped", "personal@example.com",
|
||||
registriesDir)
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:remapped", regPrefix+"remap:remapped", regPrefix+"remap:remapped")
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
|
||||
// and only the remapped prefix (mirror) is accessed.
|
||||
@@ -1174,9 +1041,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "dir:"+dir1)
|
||||
}
|
||||
|
||||
@@ -1190,12 +1055,12 @@ func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWithoutTLSVerifyFalse(c *check.C) {
|
||||
topDir := c.MkDir()
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
// dir:test isn't created beforehand just because we already know this could
|
||||
// just fail when evaluating the src
|
||||
assertSkopeoFails(c, ".*server gave HTTP response to HTTPS client.*",
|
||||
"copy", ourRegistry+"foobar", "dir:test")
|
||||
"copy", ourRegistry+"foobar", "dir:"+topDir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySchemaConversion(c *check.C) {
|
||||
@@ -1206,9 +1071,7 @@ func (s *CopySuite) TestCopySchemaConversion(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
topDir, err := ioutil.TempDir("", "manifest-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
srcDir := filepath.Join(topDir, "source")
|
||||
destDir1 := filepath.Join(topDir, "dest1")
|
||||
destDir2 := filepath.Join(topDir, "dest2")
|
||||
@@ -1231,10 +1094,15 @@ func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
verifyManifestMIMEType(c, destDir2, manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyPreserveDigests(c *check.C) {
|
||||
topDir := c.MkDir()
|
||||
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "dir:"+topDir)
|
||||
assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "--format=oci", "dir:"+topDir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Registry, schema2Registry string) {
|
||||
topDir, err := ioutil.TempDir("", "schema-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
for _, subdir := range []string{"input1", "input2", "dest2"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -1268,35 +1136,35 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://mirror.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
// .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN.
|
||||
// With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution”
|
||||
assertSkopeoFails(c, ".*(no such host|Temporary failure in name resolution).*",
|
||||
"--registries-conf="+regConfFixture, "copy", "docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
// .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN.
|
||||
// With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution”
|
||||
assertSkopeoFails(c, ".*(no such host|Temporary failure in name resolution).*",
|
||||
"--registries-conf="+regConfFixture, "copy", "docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
docker:
|
||||
localhost:5555:
|
||||
sigstore: file://@sigstore@
|
||||
lookaside: file://@lookaside@
|
||||
localhost:5555/public:
|
||||
sigstore-staging: file://@split-staging@
|
||||
sigstore: @split-read@
|
||||
lookaside-staging: file://@split-staging@
|
||||
lookaside: @split-read@
|
||||
|
||||
@@ -5,14 +5,13 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
@@ -33,10 +32,7 @@ type openshiftCluster struct {
|
||||
// in isolated test environment.
|
||||
func startOpenshiftCluster(c *check.C) *openshiftCluster {
|
||||
cluster := &openshiftCluster{}
|
||||
|
||||
dir, err := ioutil.TempDir("", "openshift-cluster")
|
||||
c.Assert(err, check.IsNil)
|
||||
cluster.workingDir = dir
|
||||
cluster.workingDir = c.MkDir()
|
||||
|
||||
cluster.startMaster(c)
|
||||
cluster.prepareRegistryConfig(c)
|
||||
@@ -196,7 +192,7 @@ func (cluster *openshiftCluster) startRegistry(c *check.C) {
|
||||
// The default configuration currently already contains acceptschema2: false
|
||||
})
|
||||
// Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated.
|
||||
configContents, err := ioutil.ReadFile(schema1Config)
|
||||
configContents, err := os.ReadFile(schema1Config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*")
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config))
|
||||
@@ -240,7 +236,7 @@ func (cluster *openshiftCluster) dockerLogin(c *check.C) {
|
||||
}`, port, authValue))
|
||||
}
|
||||
configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}`
|
||||
err = ioutil.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
err = os.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
@@ -258,12 +254,12 @@ func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) {
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
func (cluster *openshiftCluster) tearDown(c *check.C) {
|
||||
for i := len(cluster.processes) - 1; i >= 0; i-- {
|
||||
cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.workingDir != "" {
|
||||
os.RemoveAll(cluster.workingDir)
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.dockerDir != "" {
|
||||
os.RemoveAll(cluster.dockerDir)
|
||||
err := os.RemoveAll(cluster.dockerDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,11 +15,15 @@ TestRunShell is not really a test; it is a convenient way to use the registry se
|
||||
in openshift.go and CopySuite to get an interactive environment for experimentation.
|
||||
|
||||
To use it, run:
|
||||
|
||||
sudo make shell
|
||||
|
||||
to start a container, then within the container:
|
||||
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -28,7 +28,7 @@ const expectedProxySemverMajor = "0.2"
|
||||
type request struct {
|
||||
// Method is the name of the function
|
||||
Method string `json:"method"`
|
||||
// Args is the arguments (parsed inside the fuction)
|
||||
// Args is the arguments (parsed inside the function)
|
||||
Args []interface{} `json:"args"`
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ type pipefd struct {
|
||||
fd *os.File
|
||||
}
|
||||
|
||||
func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
func (p *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
req := request{
|
||||
Method: method,
|
||||
Args: args,
|
||||
@@ -66,7 +66,7 @@ func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err := self.c.Write(reqbuf)
|
||||
n, err := p.c.Write(reqbuf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd
|
||||
}
|
||||
oob := make([]byte, syscall.CmsgSpace(1))
|
||||
replybuf := make([]byte, maxMsgSize)
|
||||
n, oobn, _, _, err := self.c.ReadMsgUnix(replybuf, oob)
|
||||
n, oobn, _, _, err := p.c.ReadMsgUnix(replybuf, oob)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("reading reply: %v", err)
|
||||
return
|
||||
@@ -119,9 +119,9 @@ func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd
|
||||
return
|
||||
}
|
||||
|
||||
func (self *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
func (p *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = self.call(method, args)
|
||||
rval, fd, err = p.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -132,9 +132,9 @@ func (self *proxy) callNoFd(method string, args []interface{}) (rval interface{}
|
||||
return rval, nil
|
||||
}
|
||||
|
||||
func (self *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
func (p *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = self.call(method, args)
|
||||
rval, fd, err = p.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -144,13 +144,13 @@ func (self *proxy) callReadAllBytes(method string, args []interface{}) (rval int
|
||||
}
|
||||
fetchchan := make(chan byteFetch)
|
||||
go func() {
|
||||
manifestBytes, err := ioutil.ReadAll(fd.fd)
|
||||
manifestBytes, err := io.ReadAll(fd.fd)
|
||||
fetchchan <- byteFetch{
|
||||
content: manifestBytes,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
_, err = self.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
_, err = p.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
}
|
||||
imgid := uint32(imgidv)
|
||||
|
||||
v, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
_, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -250,22 +250,44 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
v, configBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
_, configBytes, err := p.callReadAllBytes("GetFullConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var config imgspecv1.ImageConfig
|
||||
var config imgspecv1.Image
|
||||
err = json.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate that the image config seems sane
|
||||
if config.Architecture == "" {
|
||||
return fmt.Errorf("No architecture found")
|
||||
}
|
||||
if len(config.Config.Cmd) == 0 && len(config.Config.Entrypoint) == 0 {
|
||||
return fmt.Errorf("No CMD or ENTRYPOINT set")
|
||||
}
|
||||
|
||||
// Also test this legacy interface
|
||||
_, ctrconfigBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ctrconfig imgspecv1.ImageConfig
|
||||
err = json.Unmarshal(ctrconfigBytes, &ctrconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate that the config seems sane
|
||||
if len(config.Cmd) == 0 && len(config.Entrypoint) == 0 {
|
||||
if len(ctrconfig.Cmd) == 0 && len(ctrconfig.Entrypoint) == 0 {
|
||||
return fmt.Errorf("No CMD or ENTRYPOINT set")
|
||||
}
|
||||
|
||||
_, err = p.callNoFd("CloseImage", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -20,7 +19,6 @@ const (
|
||||
type testRegistryV2 struct {
|
||||
cmd *exec.Cmd
|
||||
url string
|
||||
dir string
|
||||
username string
|
||||
password string
|
||||
email string
|
||||
@@ -45,10 +43,7 @@ func setupRegistryV2At(c *check.C, url string, auth, schema1 bool) *testRegistry
|
||||
}
|
||||
|
||||
func newTestRegistryV2At(c *check.C, url string, auth, schema1 bool) (*testRegistryV2, error) {
|
||||
tmp, err := ioutil.TempDir("", "registry-test-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp := c.MkDir()
|
||||
template := `version: 0.1
|
||||
loglevel: debug
|
||||
storage:
|
||||
@@ -58,6 +53,9 @@ storage:
|
||||
enabled: true
|
||||
http:
|
||||
addr: %s
|
||||
compatibility:
|
||||
schema1:
|
||||
enabled: true
|
||||
%s`
|
||||
var (
|
||||
htpasswd string
|
||||
@@ -71,7 +69,7 @@ http:
|
||||
username = "testuser"
|
||||
password = "testpassword"
|
||||
email = "test@test.org"
|
||||
if err := ioutil.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
if err := os.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
htpasswd = fmt.Sprintf(`auth:
|
||||
@@ -86,19 +84,18 @@ http:
|
||||
return nil, err
|
||||
}
|
||||
if _, err := fmt.Fprintf(config, template, tmp, url, htpasswd); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
binary := binaryV2
|
||||
var cmd *exec.Cmd
|
||||
if schema1 {
|
||||
binary = binaryV2Schema1
|
||||
cmd = exec.Command(binaryV2Schema1, confPath)
|
||||
} else {
|
||||
cmd = exec.Command(binaryV2, "serve", confPath)
|
||||
}
|
||||
|
||||
cmd := exec.Command(binary, confPath)
|
||||
consumeAndLogOutputs(c, fmt.Sprintf("registry-%s", url), cmd)
|
||||
if err := cmd.Start(); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
if os.IsNotExist(err) {
|
||||
c.Skip(err.Error())
|
||||
}
|
||||
@@ -107,7 +104,6 @@ http:
|
||||
return &testRegistryV2{
|
||||
cmd: cmd,
|
||||
url: url,
|
||||
dir: tmp,
|
||||
username: username,
|
||||
password: password,
|
||||
email: email,
|
||||
@@ -126,7 +122,8 @@ func (t *testRegistryV2) Ping() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) Close() {
|
||||
t.cmd.Process.Kill()
|
||||
os.RemoveAll(t.dir)
|
||||
func (t *testRegistryV2) tearDown(c *check.C) {
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = t.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -21,7 +20,6 @@ func init() {
|
||||
}
|
||||
|
||||
type SigningSuite struct {
|
||||
gpgHome string
|
||||
fingerprint string
|
||||
}
|
||||
|
||||
@@ -40,25 +38,18 @@ func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
s.gpgHome, err = ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.fingerprint, err = findFingerprint(lines)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SigningSuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
err := os.RemoveAll(s.gpgHome)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
s.gpgHome = ""
|
||||
|
||||
os.Unsetenv("GNUPGHOME")
|
||||
}
|
||||
|
||||
@@ -73,7 +64,7 @@ func (s *SigningSuite) TestSignVerifySmoke(c *check.C) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/smoketest"
|
||||
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", sigOutput.Name(),
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -40,7 +40,6 @@ func init() {
|
||||
type SyncSuite struct {
|
||||
cluster *openshiftCluster
|
||||
registry *testRegistryV2
|
||||
gpgHome string
|
||||
}
|
||||
|
||||
func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
@@ -74,10 +73,8 @@ func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
// FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, registryAuth, registrySchema1)
|
||||
|
||||
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.gpgHome = gpgHome
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n",
|
||||
@@ -85,7 +82,7 @@ func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
err := os.WriteFile(filepath.Join(gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -96,21 +93,32 @@ func (s *SyncSuite) TearDownSuite(c *check.C) {
|
||||
return
|
||||
}
|
||||
|
||||
if s.gpgHome != "" {
|
||||
os.RemoveAll(s.gpgHome)
|
||||
}
|
||||
if s.registry != nil {
|
||||
s.registry.Close()
|
||||
s.registry.tearDown(c)
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
func assertNumberOfManifestsInSubdirs(c *check.C, dir string, expectedCount int) {
|
||||
nManifests := 0
|
||||
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
c.Assert(nManifests, check.Equals, expectedCount)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
@@ -136,9 +144,7 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
@@ -163,6 +169,20 @@ func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestPreserveDigests(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "--preserve-digests", "docker://"+image, "dir:"+tmpDir)
|
||||
_, err := os.Stat(path.Join(tmpDir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", "--all", "--preserve-digests", "--format=oci", "docker://"+image, "dir:"+tmpDir)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
@@ -170,8 +190,7 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -179,8 +198,6 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
os.RemoveAll(dir1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
@@ -194,8 +211,7 @@ func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())))
|
||||
|
||||
//sync upstream image to dir, not scoped
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -210,14 +226,10 @@ func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
os.RemoveAll(dir1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepo
|
||||
@@ -239,9 +251,7 @@ func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
image := pullableRepo
|
||||
@@ -262,7 +272,8 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
|
||||
// sync to the local registry
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL)
|
||||
// sync back from local registry to a folder
|
||||
os.Remove(yamlFile)
|
||||
@@ -273,7 +284,8 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
%s: []
|
||||
`, v2DockerRegistryURL, imagePath)
|
||||
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
sysCtx = types.SystemContext{
|
||||
@@ -285,27 +297,11 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(len(localTags), check.Not(check.Equals), 0)
|
||||
c.Assert(len(localTags), check.Equals, len(tags))
|
||||
|
||||
nManifests := 0
|
||||
//count the number of manifest.json in dir1
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, len(tags))
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, len(tags))
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
@@ -318,28 +314,14 @@ k8s.gcr.io:
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, nTags)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
@@ -349,28 +331,14 @@ k8s.gcr.io:
|
||||
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
|
||||
`
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, 1)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, 1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYaml2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
@@ -401,29 +369,15 @@ quay.io:
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, nTags)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
image := pullableRepoWithLatestTag
|
||||
tag := "latest"
|
||||
@@ -465,7 +419,8 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
for _, cfg := range testCfg {
|
||||
yamlConfig := fmt.Sprintf(yamlTemplate, v2DockerRegistryURL, cfg.tlsVerify, image, tag)
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
cfg.checker(c, cfg.msg, "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
os.Remove(yamlFile)
|
||||
@@ -475,9 +430,7 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "sync-manifest-output")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
destDir1 := filepath.Join(tmpDir, "dest1")
|
||||
destDir2 := filepath.Join(tmpDir, "dest2")
|
||||
@@ -497,9 +450,7 @@ func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
@@ -530,15 +481,13 @@ func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepoWithLatestTag
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
err = os.Mkdir(dir1, 0755)
|
||||
err := os.Mkdir(dir1, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
err = os.Mkdir(dir2, 0755)
|
||||
@@ -570,9 +519,7 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
@@ -582,9 +529,7 @@ func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
assertSkopeoFails(c, ".*No images to sync found in .*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL)
|
||||
@@ -596,9 +541,7 @@ func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
const regURL = "google.com/namespace/imagename"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
@@ -611,9 +554,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
const repo = "privateimagenamethatshouldnotbepublic"
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*Registry disallows tag list retrieval.*",
|
||||
@@ -626,9 +567,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
repo := path.Join(v2DockerRegistryURL, "imagedoesnotexist")
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
@@ -641,9 +580,9 @@ func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDirSourceNotExisting(c *check.C) {
|
||||
// Make sure the dir does not exist!
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir = filepath.Join(tmpDir, "this-does-not-exist")
|
||||
err := os.RemoveAll(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = os.Stat(path.Join(tmpDir))
|
||||
c.Check(os.IsNotExist(err), check.Equals, true)
|
||||
|
||||
@@ -3,8 +3,8 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -163,15 +163,15 @@ func modifyEnviron(env []string, name, value string) []string {
|
||||
// fileFromFixtureFixture applies edits to inputPath and returns a path to the temporary file.
|
||||
// Callers should defer os.Remove(the_returned_path)
|
||||
func fileFromFixture(c *check.C, inputPath string, edits map[string]string) string {
|
||||
contents, err := ioutil.ReadFile(inputPath)
|
||||
contents, err := os.ReadFile(inputPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
for template, value := range edits {
|
||||
updated := bytes.Replace(contents, []byte(template), []byte(value), -1)
|
||||
updated := bytes.ReplaceAll(contents, []byte(template), []byte(value))
|
||||
c.Assert(bytes.Equal(updated, contents), check.Equals, false, check.Commentf("Replacing %s in %#v failed", template, string(contents))) // Verify that the template has matched something and we are not silently ignoring it.
|
||||
contents = updated
|
||||
}
|
||||
|
||||
file, err := ioutil.TempFile("", "policy.json")
|
||||
file, err := os.CreateTemp("", "policy.json")
|
||||
c.Assert(err, check.IsNil)
|
||||
path := file.Name()
|
||||
|
||||
@@ -187,7 +187,7 @@ func fileFromFixture(c *check.C, inputPath string, edits map[string]string) stri
|
||||
func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
c.Logf("Running %s %s", decompressDirsBinary, strings.Join(args, " "))
|
||||
for i, dir := range args {
|
||||
m, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d before: %s", i+1, string(m))
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
if len(out) > 0 {
|
||||
c.Logf("output: %s", out)
|
||||
}
|
||||
m, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d after: %s", i+1, string(m))
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
|
||||
// Verify manifest in a dir: image at dir is expectedMIMEType.
|
||||
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
manifestBlob, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
|
||||
132
skopeo.spec.rpkg
Normal file
132
skopeo.spec.rpkg
Normal file
@@ -0,0 +1,132 @@
|
||||
# For automatic rebuilds in COPR
|
||||
|
||||
# The following tag is to get correct syntax highlighting for this file in vim text editor
|
||||
# vim: syntax=spec
|
||||
|
||||
# Any additinoal comments should go below this line or else syntax highlighting
|
||||
# may not work.
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
%global gomodulesmode GO111MODULE=on
|
||||
%global with_debug 1
|
||||
|
||||
%if 0%{?with_debug}
|
||||
%global _find_debuginfo_dwz_opts %{nil}
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%else
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
%if ! 0%{?gobuild:1}
|
||||
%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld '" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
Name: {{{ git_dir_name }}}
|
||||
Epoch: 101
|
||||
Version: {{{ git_dir_version }}}
|
||||
Release: 1%{?dist}
|
||||
Summary: Inspect container images and repositories on registries
|
||||
License: ASL 2.0
|
||||
URL: https://github.com/containers/skopeo
|
||||
VCS: {{{ git_dir_vcs }}}
|
||||
Source: {{{ git_dir_pack }}}
|
||||
%if 0%{?fedora} && ! 0%{?rhel}
|
||||
BuildRequires: btrfs-progs-devel
|
||||
%endif
|
||||
BuildRequires: golang >= 1.16.6
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: git-core
|
||||
BuildRequires: go-md2man
|
||||
%if 0%{?fedora} || 0%{?rhel} >= 9
|
||||
BuildRequires: go-rpm-macros
|
||||
%endif
|
||||
BuildRequires: pkgconfig(devmapper)
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: pkgconfig
|
||||
BuildRequires: make
|
||||
BuildRequires: ostree-devel
|
||||
%if 0%{?fedora} <= 35
|
||||
Requires: containers-common >= 4:1-39
|
||||
%else
|
||||
Requires: containers-common >= 4:1-46
|
||||
%endif
|
||||
|
||||
%description
|
||||
Command line utility to inspect images and repositories directly on Docker
|
||||
registries without the need to pull them.
|
||||
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
Requires: bats
|
||||
Requires: gnupg
|
||||
Requires: jq
|
||||
Requires: podman
|
||||
Requires: httpd-tools
|
||||
Requires: openssl
|
||||
Requires: fakeroot
|
||||
Requires: squashfs-tools
|
||||
|
||||
%description tests
|
||||
%{summary}
|
||||
|
||||
This package contains system tests for %{name}
|
||||
|
||||
%prep
|
||||
{{{ git_dir_setup_macro }}}
|
||||
|
||||
sed -i 's/install-binary: bin\/skopeo/install-binary:/' Makefile
|
||||
|
||||
# This will invoke `make` command in the directory with the extracted sources.
|
||||
%build
|
||||
%set_build_flags
|
||||
export CGO_CFLAGS=$CFLAGS
|
||||
# These extra flags present in $CFLAGS have been skipped for now as they break the build
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
|
||||
|
||||
%ifarch x86_64
|
||||
export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full"
|
||||
%endif
|
||||
|
||||
LDFLAGS=""
|
||||
|
||||
export BUILDTAGS="$(hack/libdm_tag.sh)"
|
||||
%if 0%{?rhel}
|
||||
export BUILDTAGS="$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion"
|
||||
%endif
|
||||
|
||||
%gobuild -o bin/%{name} ./cmd/%{name}
|
||||
|
||||
%install
|
||||
%{__make} PREFIX=%{buildroot}%{_prefix} install-binary install-docs install-completions
|
||||
|
||||
# system tests
|
||||
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp -pav systemtest/* %{buildroot}/%{_datadir}/%{name}/test/system/
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
%{_bindir}/%{name}
|
||||
%{_mandir}/man1/%%{name}*
|
||||
%dir %{_datadir}/bash-completion
|
||||
%dir %{_datadir}/bash-completion/completions
|
||||
%{_datadir}/bash-completion/completions/%{name}
|
||||
%dir %{_datadir}/fish
|
||||
%dir %{_datadir}/fish/vendor_completions.d
|
||||
%{_datadir}/fish/vendor_completions.d/%{name}.fish
|
||||
%dir %{_datadir}/zsh
|
||||
%dir %{_datadir}/zsh/site-functions
|
||||
%{_datadir}/zsh/site-functions/_%{name}
|
||||
|
||||
%files tests
|
||||
%license LICENSE
|
||||
%{_datadir}/%{name}/test
|
||||
|
||||
%changelog
|
||||
{{{ git_dir_changelog }}}
|
||||
@@ -27,11 +27,20 @@ load helpers
|
||||
# Now run inspect locally
|
||||
run_skopeo inspect dir:$workdir
|
||||
inspect_local=$output
|
||||
run_skopeo inspect --raw dir:$workdir
|
||||
inspect_local_raw=$output
|
||||
config_digest=$(jq -r '.config.digest' <<<"$inspect_local_raw")
|
||||
|
||||
# Each SHA-named file must be listed in the output of 'inspect'
|
||||
# Each SHA-named layer file (but not the config) must be listed in the output of 'inspect'.
|
||||
# In all existing versions of Skopeo (with 1.6 being the current as of this comment),
|
||||
# the output of 'inspect' lists layer digests,
|
||||
# but not the digest of the config blob ($config_digest), if any.
|
||||
layers=$(jq -r '.Layers' <<<"$inspect_local")
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
expect_output --from="$inspect_local" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
if [ "sha256:$sha" != "$config_digest" ]; then
|
||||
expect_output --from="$layers" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
fi
|
||||
done
|
||||
|
||||
# Simple sanity check on 'inspect' output.
|
||||
|
||||
@@ -50,7 +50,7 @@ function setup() {
|
||||
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
run_skopeo copy --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
|
||||
# zstd magic number
|
||||
local magic=$(printf "\x28\xb5\x2f\xfd")
|
||||
@@ -125,6 +125,10 @@ function setup() {
|
||||
run podman --root $TESTDIR/podmanroot images
|
||||
expect_output --substring "mine"
|
||||
|
||||
# rootless cleanup needs to be done with unshare due to subuids
|
||||
if [[ "$(id -u)" != "0" ]]; then
|
||||
run podman unshare rm -rf $TESTDIR/podmanroot
|
||||
fi
|
||||
}
|
||||
|
||||
# shared blob directory
|
||||
@@ -144,6 +148,16 @@ function setup() {
|
||||
diff -urN $shareddir $dir2/blobs
|
||||
}
|
||||
|
||||
@test "copy: sif image" {
|
||||
type -path fakeroot || skip "'fakeroot' tool not available"
|
||||
|
||||
local localimg=dir:$TESTDIR/dir
|
||||
|
||||
run_skopeo copy sif:${TEST_SOURCE_DIR}/testdata/busybox_latest.sif $localimg
|
||||
run_skopeo inspect $localimg --format "{{.Architecture}}"
|
||||
expect_output "amd64"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
|
||||
@@ -12,6 +12,13 @@ function setup() {
|
||||
export GNUPGHOME=$TESTDIR/skopeo-gpg
|
||||
mkdir --mode=0700 $GNUPGHOME
|
||||
|
||||
PASSPHRASE_FILE=$TESTDIR/passphrase-file
|
||||
passphrase=$(random_string 20)
|
||||
echo $passphrase > $PASSPHRASE_FILE
|
||||
|
||||
PASSPHRASE_FILE_WRONG=$TESTDIR/passphrase-file-wrong
|
||||
echo $(random_string 10) > $PASSPHRASE_FILE_WRONG
|
||||
|
||||
# gpg on f30 needs this, otherwise:
|
||||
# gpg: agent_genkey failed: Inappropriate ioctl for device
|
||||
# ...but gpg on f29 (and, probably, Ubuntu) doesn't grok this
|
||||
@@ -21,7 +28,7 @@ function setup() {
|
||||
fi
|
||||
|
||||
for k in alice bob;do
|
||||
gpg --batch $GPGOPTS --gen-key --passphrase '' <<END_GPG
|
||||
gpg --batch $GPGOPTS --gen-key --passphrase $passphrase <<END_GPG
|
||||
Key-Type: RSA
|
||||
Name-Real: Test key - $k
|
||||
Name-email: $k@test.redhat.com
|
||||
@@ -81,8 +88,18 @@ END_POLICY_JSON
|
||||
start_registry reg
|
||||
}
|
||||
|
||||
function kill_gpg_agent {
|
||||
# Kill the running gpg-agent to drop unlocked keys. This allows for testing
|
||||
# handling of invalid passphrases.
|
||||
run gpgconf --kill gpg-agent
|
||||
if [ "$status" -ne 0 ]; then
|
||||
die "could not restart gpg-agent: $output"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "signing" {
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
|
||||
kill_gpg_agent
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null --passphrase-file $PASSPHRASE_FILE
|
||||
if [[ "$output" =~ 'signing is not supported' ]]; then
|
||||
skip "skopeo built without support for creating signatures"
|
||||
return 1
|
||||
@@ -100,7 +117,8 @@ END_POLICY_JSON
|
||||
while read path sig comments; do
|
||||
local sign_opt=
|
||||
if [[ $sig != '-' ]]; then
|
||||
sign_opt="--sign-by=${sig}@test.redhat.com"
|
||||
kill_gpg_agent
|
||||
sign_opt=" --sign-passphrase-file=$PASSPHRASE_FILE --sign-by=${sig}@test.redhat.com"
|
||||
fi
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
copy --dest-tls-verify=false \
|
||||
@@ -144,7 +162,8 @@ END_TESTS
|
||||
}
|
||||
|
||||
@test "signing: remove signature" {
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
|
||||
kill_gpg_agent
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null --passphrase-file $PASSPHRASE_FILE
|
||||
if [[ "$output" =~ 'signing is not supported' ]]; then
|
||||
skip "skopeo built without support for creating signatures"
|
||||
return 1
|
||||
@@ -157,11 +176,24 @@ END_TESTS
|
||||
run_skopeo copy docker://quay.io/libpod/busybox:latest \
|
||||
dir:$TESTDIR/busybox
|
||||
# Push a signed image
|
||||
kill_gpg_agent
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
copy --dest-tls-verify=false \
|
||||
--sign-by=alice@test.redhat.com \
|
||||
--sign-passphrase-file $PASSPHRASE_FILE \
|
||||
dir:$TESTDIR/busybox \
|
||||
docker://localhost:5000/myns/alice:signed
|
||||
|
||||
# Wrong passphrase file
|
||||
kill_gpg_agent
|
||||
run_skopeo 1 --registries.d $REGISTRIES_D \
|
||||
copy --dest-tls-verify=false \
|
||||
--sign-by=alice@test.redhat.com \
|
||||
--sign-passphrase-file $PASSPHRASE_FILE_WRONG \
|
||||
dir:$TESTDIR/busybox \
|
||||
docker://localhost:5000/myns/alice:signed
|
||||
expect_output --substring "Bad passphrase"
|
||||
|
||||
# Fetch the image with signature
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
--policy $POLICY_JSON \
|
||||
@@ -180,7 +212,8 @@ END_TESTS
|
||||
}
|
||||
|
||||
@test "signing: standalone" {
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
|
||||
kill_gpg_agent
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null --passphrase-file $PASSPHRASE_FILE
|
||||
if [[ "$output" =~ 'signing is not supported' ]]; then
|
||||
skip "skopeo built without support for creating signatures"
|
||||
return 1
|
||||
@@ -196,7 +229,9 @@ END_TESTS
|
||||
docker://localhost:5000/busybox:latest \
|
||||
dir:$TESTDIR/busybox
|
||||
# Standalone sign
|
||||
kill_gpg_agent
|
||||
run_skopeo standalone-sign -o $TESTDIR/busybox.signature \
|
||||
--passphrase-file $PASSPHRASE_FILE \
|
||||
$TESTDIR/busybox/manifest.json \
|
||||
localhost:5000/busybox:latest \
|
||||
alice@test.redhat.com
|
||||
|
||||
28
systemtest/070-list-tags.bats
Normal file
28
systemtest/070-list-tags.bats
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# list-tags tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
# list from registry
|
||||
@test "list-tags: remote repository on a registry" {
|
||||
local remote_image=quay.io/libpod/alpine_labels
|
||||
|
||||
run_skopeo list-tags "docker://${remote_image}"
|
||||
expect_output --substring "quay.io/libpod/alpine_labels"
|
||||
expect_output --substring "latest"
|
||||
}
|
||||
|
||||
# list from a local docker-archive file
|
||||
@test "list-tags: from a docker-archive file" {
|
||||
local file_name=${TEST_SOURCE_DIR}/testdata/docker-two-images.tar.xz
|
||||
|
||||
run_skopeo list-tags docker-archive:$file_name
|
||||
expect_output --substring "example.com/empty:latest"
|
||||
expect_output --substring "example.com/empty/but:different"
|
||||
|
||||
}
|
||||
|
||||
|
||||
# vim: filetype=sh
|
||||
26
systemtest/080-sync.bats
Normal file
26
systemtest/080-sync.bats
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Sync tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
}
|
||||
|
||||
@test "sync: --dry-run" {
|
||||
local remote_image=quay.io/libpod/busybox:latest
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo sync --dry-run --src docker --dest dir --scoped $remote_image $dir
|
||||
expect_output --substring "Would have copied image"
|
||||
expect_output --substring "from=\"docker://${remote_image}\" to=\"dir:${dir}/${remote_image}\""
|
||||
expect_output --substring "Would have synced 1 images from 1 sources"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
@@ -1,6 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
SKOPEO_BINARY=${SKOPEO_BINARY:-$(dirname ${BASH_SOURCE})/../skopeo}
|
||||
# Directory containing system test sources
|
||||
TEST_SOURCE_DIR=${TEST_SOURCE_DIR:-$(dirname ${BASH_SOURCE})}
|
||||
|
||||
# Skopeo executable
|
||||
SKOPEO_BINARY=${SKOPEO_BINARY:-${TEST_SOURCE_DIR}/../bin/skopeo}
|
||||
|
||||
# Default timeout for a skopeo command.
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
BIN
systemtest/testdata/busybox_latest.sif
vendored
Executable file
BIN
systemtest/testdata/busybox_latest.sif
vendored
Executable file
Binary file not shown.
BIN
systemtest/testdata/docker-two-images.tar.xz
vendored
Normal file
BIN
systemtest/testdata/docker-two-images.tar.xz
vendored
Normal file
Binary file not shown.
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
@@ -1,2 +1,2 @@
|
||||
toml.test
|
||||
/toml.test
|
||||
/toml-test
|
||||
|
||||
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
@@ -1 +0,0 @@
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
210
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
210
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -1,10 +1,5 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
@@ -14,28 +9,18 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
This library requires Go 1.13 or newer; install it with:
|
||||
This library requires Go 1.13 or newer; add it to your go.mod with:
|
||||
|
||||
$ go get github.com/BurntSushi/toml
|
||||
% go get github.com/BurntSushi/toml@latest
|
||||
|
||||
It also comes with a TOML validator CLI tool:
|
||||
|
||||
$ go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
$ tomlv some-toml-file.toml
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
|
||||
% tomlv some-toml-file.toml
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles XML and
|
||||
JSON. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
For the simplest example, consider some TOML file as just a list of keys and
|
||||
values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
@@ -45,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
@@ -53,21 +38,15 @@ type Config struct {
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
DOB time.Time
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
_, err := toml.Decode(tomlData, &conf)
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML key
|
||||
value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
@@ -75,146 +54,67 @@ some_key_NAME = "wat"
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
Beware that like other most other decoders **only exported fields** are
|
||||
considered when encoding and decoding; private fields are silently ignored.
|
||||
Beware that like other decoders **only exported fields** are considered when
|
||||
encoding and decoding; private fields are silently ignored.
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
|
||||
Here's an example that automatically parses values in a `mail.Address`:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
Can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
// Create address type which satisfies the encoding.TextUnmarshaler interface.
|
||||
type address struct {
|
||||
*mail.Address
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
func (a *address) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
a.Address, err = mail.ParseAddress(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode it.
|
||||
func decode() {
|
||||
blob := `
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
`
|
||||
|
||||
var contacts struct {
|
||||
Contacts []address
|
||||
}
|
||||
|
||||
_, err := toml.Decode(blob, &contacts)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range contacts.Contacts {
|
||||
fmt.Printf("%#v\n", c.Address)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
|
||||
// &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
|
||||
}
|
||||
```
|
||||
|
||||
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
|
||||
a similar way.
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
||||
See the [`_example/`](/_example) directory for a more complex example.
|
||||
|
||||
379
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
379
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -1,13 +1,16 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -18,12 +21,30 @@ type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
//
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
@@ -40,22 +61,12 @@ type Primitive struct {
|
||||
context Key
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
// The significand precision for float32 and float64 is 24 and 53 bits; this is
|
||||
// the range a natural number can be stored in a float without loss of data.
|
||||
const (
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
|
||||
)
|
||||
|
||||
// Decoder decodes TOML data.
|
||||
//
|
||||
@@ -67,6 +78,9 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
||||
// in the local timezone.
|
||||
//
|
||||
// time.Duration types are treated as nanoseconds if the TOML value is an
|
||||
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||
// obvious Go types.
|
||||
//
|
||||
@@ -74,7 +88,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
// interface, in which case any primitive TOML value (floats, strings, integers,
|
||||
// booleans, datetimes) will be converted to a []byte and given to the value's
|
||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// time duration strings.
|
||||
// email addresses.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
@@ -100,18 +114,39 @@ func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
var (
|
||||
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
s := "%q"
|
||||
if reflect.TypeOf(v) == nil {
|
||||
s = "%v"
|
||||
}
|
||||
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// TODO: have parser should read from io.Reader? Or at the very least, make
|
||||
// it read from []byte rather than string
|
||||
// Check if this is a supported type: struct, map, interface{}, or something
|
||||
// that implements UnmarshalTOML or UnmarshalText.
|
||||
rv = indirect(rv)
|
||||
rt := rv.Type()
|
||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
|
||||
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
|
||||
}
|
||||
|
||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||
// read from []byte rather than string
|
||||
data, err := ioutil.ReadAll(dec.r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
@@ -121,29 +156,33 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
mapping: p.mapping,
|
||||
keyInfo: p.keyInfo,
|
||||
keys: p.ordered,
|
||||
decoded: make(map[string]struct{}, len(p.ordered)),
|
||||
context: nil,
|
||||
data: data,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
return md, md.unify(p.mapping, rv)
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
@@ -154,7 +193,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
if rv.Type() == primitiveType {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
@@ -166,17 +205,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
rvi := rv.Interface()
|
||||
if v, ok := rvi.(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
|
||||
@@ -187,7 +223,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
@@ -213,17 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
|
||||
return md.e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
return md.e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
@@ -232,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
return md.e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
@@ -254,17 +286,18 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
|
||||
err := md.unify(datum, subv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -272,10 +305,10 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
if k := rv.Type().Key().Kind(); k != reflect.String {
|
||||
return fmt.Errorf(
|
||||
"toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
k, rv.Type())
|
||||
keyType := rv.Type().Key().Kind()
|
||||
if keyType != reflect.String && keyType != reflect.Interface {
|
||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
keyType, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
@@ -283,23 +316,32 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
return md.badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.decoded[md.context.add(k).String()] = struct{}{}
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
|
||||
err := md.unify(v, indirect(rvval))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
|
||||
switch keyType {
|
||||
case reflect.Interface:
|
||||
rvkey.Set(reflect.ValueOf(k))
|
||||
case reflect.String:
|
||||
rvkey.SetString(k)
|
||||
}
|
||||
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
@@ -311,10 +353,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
return md.badtype("slice", data)
|
||||
}
|
||||
if l := datav.Len(); l != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
@@ -325,7 +367,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
return md.badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
@@ -346,26 +388,35 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(json.Number)
|
||||
if ok {
|
||||
if i, ok := data.(int64); ok {
|
||||
rv.SetString(strconv.FormatInt(i, 10))
|
||||
} else if f, ok := data.(float64); ok {
|
||||
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
|
||||
} else {
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
rvk := rv.Kind()
|
||||
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
switch rvk {
|
||||
case reflect.Float32:
|
||||
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
@@ -374,54 +425,60 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
|
||||
if num, ok := data.(int64); ok {
|
||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
return nil
|
||||
}
|
||||
|
||||
return md.badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
_, ok := rv.Interface().(time.Duration)
|
||||
if ok {
|
||||
// Parse as string duration, and fall back to regular integer parsing
|
||||
// (as nanosecond) if this is not a string.
|
||||
if s, ok := data.(string); ok {
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return md.parseErr(errParseDuration{s})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
rv.SetInt(int64(dur))
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
|
||||
num, ok := data.(int64)
|
||||
if !ok {
|
||||
return md.badtype("integer", data)
|
||||
}
|
||||
|
||||
rvk := rv.Kind()
|
||||
switch {
|
||||
case rvk >= reflect.Int && rvk <= reflect.Int64:
|
||||
if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
|
||||
(rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
|
||||
(rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
case rvk >= reflect.Uint && rvk <= reflect.Uint64:
|
||||
unum := uint64(num)
|
||||
if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
|
||||
rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
|
||||
rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
@@ -429,7 +486,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
return md.badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
@@ -440,7 +497,13 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
case Marshaler:
|
||||
text, err := sdata.MarshalTOML()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case encoding.TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -457,7 +520,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
return md.badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
@@ -465,22 +528,54 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
||||
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
|
||||
}
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *MetaData) e(format string, args ...interface{}) error {
|
||||
f := "toml: "
|
||||
if len(md.context) > 0 {
|
||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||
p := md.keyInfo[md.context.String()].pos
|
||||
if p.Line > 0 {
|
||||
f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(f+format, args...)
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
// Pointers are followed until the value is not a pointer. New values are
|
||||
// allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of interest
|
||||
// to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
pvi := pv.Interface()
|
||||
if _, ok := pvi.(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
if _, ok := pvi.(Unmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
@@ -496,16 +591,12 @@ func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
rvi := rv.Interface()
|
||||
if _, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rvi.(Unmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
||||
|
||||
1
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
1
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package toml
|
||||
|
||||
24
vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
24
vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
@@ -5,29 +5,17 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use the identical encoding.TextMarshaler instead. It is defined here to
|
||||
// support Go 1.1 and older.
|
||||
// Deprecated: use encoding.TextMarshaler
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use the identical encoding.TextUnmarshaler instead. It is defined here to
|
||||
// support Go 1.1 and older.
|
||||
// Deprecated: use encoding.TextUnmarshaler
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
// Deprecated: use MetaData.PrimitiveDecode.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
md := MetaData{decoded: make(map[string]struct{})}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use NewDecoder(reader).Decode(&v) instead.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(r).Decode(v)
|
||||
}
|
||||
// Deprecated: use NewDecoder(reader).Decode(&value).
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||
|
||||
300
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
300
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -3,6 +3,7 @@ package toml
|
||||
import (
|
||||
"bufio"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -21,12 +22,11 @@ type tomlEncodeError struct{ error }
|
||||
var (
|
||||
errArrayNilElement = errors.New("toml: cannot encode array with nil element")
|
||||
errNonString = errors.New("toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
|
||||
errNoKey = errors.New("toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
var dblQuotedReplacer = strings.NewReplacer(
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
"\x00", `\u0000`,
|
||||
@@ -64,35 +64,62 @@ var quotedReplacer = strings.NewReplacer(
|
||||
"\x7f", `\u007f`,
|
||||
)
|
||||
|
||||
var (
|
||||
marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Marshaler is the interface implemented by types that can marshal themselves
|
||||
// into valid TOML.
|
||||
type Marshaler interface {
|
||||
MarshalTOML() ([]byte, error)
|
||||
}
|
||||
|
||||
// Encoder encodes a Go to a TOML document.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same as
|
||||
// for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.
|
||||
// for the Decode* functions.
|
||||
//
|
||||
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
||||
// representation.
|
||||
//
|
||||
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
|
||||
// encoding the value as custom TOML.
|
||||
//
|
||||
// If you want to write arbitrary binary data then you will need to use
|
||||
// something like base64 since TOML does not have any binary types.
|
||||
//
|
||||
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
|
||||
// are encoded first.
|
||||
//
|
||||
// Go maps will be sorted alphabetically by key for deterministic output.
|
||||
//
|
||||
// The toml struct tag can be used to provide the key name; if omitted the
|
||||
// struct field name will be used. If the "omitempty" option is present the
|
||||
// following value will be skipped:
|
||||
//
|
||||
// - arrays, slices, maps, and string with len of 0
|
||||
// - struct with all zero values
|
||||
// - bool false
|
||||
//
|
||||
// If omitzero is given all int and float types with a value of 0 will be
|
||||
// skipped.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation will return an
|
||||
// error. Examples of this includes maps with non-string keys, slices with nil
|
||||
// elements, embedded non-struct types, and nested slices containing maps or
|
||||
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
|
||||
// is okay, as is []map[string][]string).
|
||||
//
|
||||
// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
|
||||
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
|
||||
// keys are silently discarded.
|
||||
type Encoder struct {
|
||||
// The string to use for a single indentation level. The default is two
|
||||
// spaces.
|
||||
// String to use for a single indentation level; default is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
hasWritten bool // written any output to w yet?
|
||||
}
|
||||
|
||||
// NewEncoder create a new Encoder.
|
||||
@@ -130,17 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch t := rv.Interface().(type) {
|
||||
case time.Time, encoding.TextMarshaler:
|
||||
// If we can marshal the type to text, then we use that. This prevents the
|
||||
// encoder for handling these types as generic structs (or whatever the
|
||||
// underlying type of a TextMarshaler is).
|
||||
switch {
|
||||
case isMarshaler(rv):
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
return
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
case Primitive:
|
||||
enc.encode(key, reflect.ValueOf(t.undecoded))
|
||||
case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
|
||||
enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -200,17 +225,49 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
enc.wf(v.In(time.UTC).Format(format))
|
||||
}
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
// Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
case Marshaler:
|
||||
s, err := v.MarshalTOML()
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalTOML returned nil and no error"))
|
||||
}
|
||||
enc.w.Write(s)
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
s, err := v.MarshalText()
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalText returned nil and no error"))
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
return
|
||||
case time.Duration:
|
||||
enc.writeQuoted(v.String())
|
||||
return
|
||||
case json.Number:
|
||||
n, _ := rv.Interface().(json.Number)
|
||||
|
||||
if n == "" { /// Useful zero value.
|
||||
enc.w.WriteByte('0')
|
||||
return
|
||||
} else if v, err := n.Int64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
} else if v, err := n.Float64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
enc.eElement(rv.Elem())
|
||||
return
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
case reflect.Bool:
|
||||
@@ -246,7 +303,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
default:
|
||||
encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
|
||||
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,14 +317,14 @@ func floatAddDecimal(fstr string) string {
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
elem := eindirect(rv.Index(i))
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
@@ -281,12 +338,12 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
trv := eindirect(rv.Index(i))
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key)
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv, false)
|
||||
}
|
||||
@@ -299,14 +356,14 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key)
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv, false)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
switch rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv, inline)
|
||||
case reflect.Struct:
|
||||
@@ -328,7 +385,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
@@ -338,7 +395,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
for i, mapKey := range mapKeys {
|
||||
val := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
@@ -364,6 +421,15 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
}
|
||||
}
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table then all keys under it will be in that
|
||||
@@ -380,38 +446,42 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
|
||||
isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
|
||||
if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
opts := getOptions(f.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
|
||||
frv := rv.Field(i)
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
//
|
||||
// Non-struct anonymous fields use the normal encoding logic.
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isEmbed {
|
||||
if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
|
||||
addFields(frv.Type(), frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
if typeIsTable(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
if is32Bit {
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -420,7 +490,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
writeFields := func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
@@ -462,17 +532,32 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
// tomlTypeOfGo returns the TOML type name of the Go value's type.
|
||||
//
|
||||
// It is used to determine whether the types of array elements are mixed (which
|
||||
// is forbidden). If the Go value is nil, then it is illegal for it to be an
|
||||
// array element, and valueIsNil is returned as true.
|
||||
//
|
||||
// The type may be `nil`, which means no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Struct {
|
||||
if rv.Type() == timeType {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
}
|
||||
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
@@ -484,7 +569,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
if isTableArray(rv) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
@@ -494,56 +579,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case encoding.TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
// Someone used a pointer receiver: we can make it work for pointer
|
||||
// values.
|
||||
if rv.CanAddr() {
|
||||
_, ok := rv.Addr().Interface().(encoding.TextMarshaler)
|
||||
if ok {
|
||||
return tomlString
|
||||
}
|
||||
}
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
_, ok := rv.Interface().(encoding.TextMarshaler)
|
||||
if ok {
|
||||
return tomlString
|
||||
}
|
||||
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
|
||||
panic("") // Need *some* return value
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
func isMarshaler(rv reflect.Value) bool {
|
||||
return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
|
||||
}
|
||||
|
||||
// isTableArray reports if all entries in the array or slice are a table.
|
||||
func isTableArray(arr reflect.Value) bool {
|
||||
if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
/// Don't allow nil.
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
if tomlTypeOfGo(rv.Index(i)) == nil {
|
||||
ret := true
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
tt := tomlTypeOfGo(eindirect(arr.Index(i)))
|
||||
// Don't allow nil.
|
||||
if tt == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
}
|
||||
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
if ret && !typeEqual(tomlHash, tt) {
|
||||
ret = false
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
return ret
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
@@ -588,6 +652,8 @@ func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Struct:
|
||||
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
@@ -604,7 +670,14 @@ func (enc *Encoder) newline() {
|
||||
//
|
||||
// key = <any value>
|
||||
//
|
||||
// If inline is true it won't add a newline at the end.
|
||||
// This is also used for "k = v" in inline tables; so something like this will
|
||||
// be written in three calls:
|
||||
//
|
||||
// ┌────────────────────┐
|
||||
// │ ┌───┐ ┌─────┐│
|
||||
// v v v v vv
|
||||
// key = {k = v, k2 = v2}
|
||||
//
|
||||
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
@@ -617,7 +690,8 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
_, err := fmt.Fprintf(enc.w, format, v...)
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
@@ -631,13 +705,25 @@ func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
// Resolve any level of pointers to the actual value (e.g. **string → string).
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
if isMarshaler(v) {
|
||||
return v
|
||||
}
|
||||
if v.CanAddr() { /// Special case for marshalers; see #358.
|
||||
if pv := v.Addr(); isMarshaler(pv) {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
if v.IsNil() {
|
||||
return v
|
||||
}
|
||||
|
||||
return eindirect(v.Elem())
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
|
||||
276
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
Normal file
276
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseError is returned when there is an error parsing the TOML syntax.
|
||||
//
|
||||
// For example invalid syntax, duplicate keys, etc.
|
||||
//
|
||||
// In addition to the error message itself, you can also print detailed location
|
||||
// information with context by using ErrorWithPosition():
|
||||
//
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
//
|
||||
// At line 4, column 2-7:
|
||||
//
|
||||
// 2 | fruit = []
|
||||
// 3 |
|
||||
// 4 | [[fruit]] # Not allowed
|
||||
// ^^^^^
|
||||
//
|
||||
// Furthermore, the ErrorWithUsage() can be used to print the above with some
|
||||
// more detailed usage guidance:
|
||||
//
|
||||
// toml: error: newlines not allowed within inline tables
|
||||
//
|
||||
// At line 1, column 18:
|
||||
//
|
||||
// 1 | x = [{ key = 42 #
|
||||
// ^
|
||||
//
|
||||
// Error help:
|
||||
//
|
||||
// Inline tables must always be on a single line:
|
||||
//
|
||||
// table = {key = 42, second = 43}
|
||||
//
|
||||
// It is invalid to split them over multiple lines like so:
|
||||
//
|
||||
// # INVALID
|
||||
// table = {
|
||||
// key = 42,
|
||||
// second = 43
|
||||
// }
|
||||
//
|
||||
// Use regular for this:
|
||||
//
|
||||
// [table]
|
||||
// key = 42
|
||||
// second = 43
|
||||
type ParseError struct {
|
||||
Message string // Short technical message.
|
||||
Usage string // Longer message with usage guidance; may be blank.
|
||||
Position Position // Position of the error
|
||||
LastKey string // Last parsed key, may be blank.
|
||||
Line int // Line the error occurred. Deprecated: use Position.
|
||||
|
||||
err error
|
||||
input string
|
||||
}
|
||||
|
||||
// Position of an error.
|
||||
type Position struct {
|
||||
Line int // Line number, starting at 1.
|
||||
Start int // Start of error, as byte offset starting at 0.
|
||||
Len int // Lenght in bytes.
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
msg := pe.Message
|
||||
if msg == "" { // Error from errorf()
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
if pe.LastKey == "" {
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
|
||||
}
|
||||
return fmt.Sprintf("toml: line %d (last key %q): %s",
|
||||
pe.Position.Line, pe.LastKey, msg)
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
func (pe ParseError) ErrorWithPosition() string {
|
||||
if pe.input == "" { // Should never happen, but just in case.
|
||||
return pe.Error()
|
||||
}
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
col = pe.column(lines)
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
|
||||
msg := pe.Message
|
||||
if msg == "" {
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
// TODO: don't show control characters as literals? This may not show up
|
||||
// well everywhere.
|
||||
|
||||
if pe.Position.Len == 1 {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
|
||||
msg, pe.Position.Line, col+1)
|
||||
} else {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
|
||||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||
}
|
||||
if pe.Position.Line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
|
||||
}
|
||||
if pe.Position.Line > 1 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
|
||||
}
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context and usage
|
||||
// guidance.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
func (pe ParseError) ErrorWithUsage() string {
|
||||
m := pe.ErrorWithPosition()
|
||||
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
||||
lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = " " + lines[i]
|
||||
}
|
||||
}
|
||||
return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (pe ParseError) column(lines []string) int {
|
||||
var pos, col int
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= pe.Position.Start {
|
||||
col = pe.Position.Start - pos
|
||||
if col < 0 { // Should never happen, but just in case.
|
||||
col = 0
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
|
||||
return col
|
||||
}
|
||||
|
||||
type (
|
||||
errLexControl struct{ r rune }
|
||||
errLexEscape struct{ r rune }
|
||||
errLexUTF8 struct{ b byte }
|
||||
errLexInvalidNum struct{ v string }
|
||||
errLexInvalidDate struct{ v string }
|
||||
errLexInlineTableNL struct{}
|
||||
errLexStringNL struct{}
|
||||
errParseRange struct {
|
||||
i interface{} // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
}
|
||||
errParseDuration struct{ d string }
|
||||
)
|
||||
|
||||
func (e errLexControl) Error() string {
|
||||
return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
|
||||
}
|
||||
func (e errLexControl) Usage() string { return "" }
|
||||
|
||||
func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
|
||||
func (e errLexEscape) Usage() string { return usageEscape }
|
||||
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
|
||||
func (e errLexUTF8) Usage() string { return "" }
|
||||
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
|
||||
func (e errLexInvalidNum) Usage() string { return "" }
|
||||
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
|
||||
func (e errLexInvalidDate) Usage() string { return "" }
|
||||
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
|
||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
|
||||
const usageEscape = `
|
||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||
|
||||
The following escape sequences are supported:
|
||||
\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
|
||||
|
||||
To prevent a '\' from being recognized as an escape character, use either:
|
||||
|
||||
- a ' or '''-delimited string; escape characters aren't processed in them; or
|
||||
- write two backslashes to get a single backslash: '\\'.
|
||||
|
||||
If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
|
||||
instead of '\' will usually also work: "C:/Users/martin".
|
||||
`
|
||||
|
||||
const usageInlineNewline = `
|
||||
Inline tables must always be on a single line:
|
||||
|
||||
table = {key = 42, second = 43}
|
||||
|
||||
It is invalid to split them over multiple lines like so:
|
||||
|
||||
# INVALID
|
||||
table = {
|
||||
key = 42,
|
||||
second = 43
|
||||
}
|
||||
|
||||
Use regular for this:
|
||||
|
||||
[table]
|
||||
key = 42
|
||||
second = 43
|
||||
`
|
||||
|
||||
const usageStringNewline = `
|
||||
Strings must always be on a single line, and cannot span more than one line:
|
||||
|
||||
# INVALID
|
||||
string = "Hello,
|
||||
world!"
|
||||
|
||||
Instead use """ or ''' to split strings over multiple lines:
|
||||
|
||||
string = """Hello,
|
||||
world!"""
|
||||
`
|
||||
|
||||
const usageIntOverflow = `
|
||||
This number is too large; this may be an error in the TOML, but it can also be a
|
||||
bug in the program that uses too small of an integer.
|
||||
|
||||
The maximum and minimum values are:
|
||||
|
||||
size │ lowest │ highest
|
||||
───────┼────────────────┼──────────
|
||||
int8 │ -128 │ 127
|
||||
int16 │ -32,768 │ 32,767
|
||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||
uint8 │ 0 │ 255
|
||||
uint16 │ 0 │ 65535
|
||||
uint32 │ 0 │ 4294967295
|
||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||
|
||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||
`
|
||||
|
||||
const usageDuration = `
|
||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||
|
||||
ns nanoseconds (billionth of a second)
|
||||
us, µs microseconds (millionth of a second)
|
||||
ms milliseconds (thousands of a second)
|
||||
s seconds
|
||||
m minutes
|
||||
h hours
|
||||
|
||||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||
seconds.
|
||||
`
|
||||
3
vendor/github.com/BurntSushi/toml/go.mod
generated
vendored
3
vendor/github.com/BurntSushi/toml/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/BurntSushi/toml
|
||||
|
||||
go 1.16
|
||||
0
vendor/github.com/BurntSushi/toml/go.sum
generated
vendored
0
vendor/github.com/BurntSushi/toml/go.sum
generated
vendored
374
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
374
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -37,28 +37,14 @@ const (
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
const eof = 0
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
func (p Position) String() string {
|
||||
return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len)
|
||||
}
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
@@ -67,26 +53,26 @@ type lexer struct {
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to four runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
// Allow for backing up up to 4 runes. This is necessary because TOML
|
||||
// contains 3-rune tokens (""" and ''').
|
||||
prevWidths [4]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
nprev int // how many of prevWidths are in use
|
||||
atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again.
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
//
|
||||
// The idea is to reuse parts of the state machine in various places. For
|
||||
// example, values can appear at the top level or within arbitrarily nested
|
||||
// arrays. The last state on the stack is used after a value has been lexed.
|
||||
// Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
typ itemType
|
||||
val string
|
||||
err error
|
||||
pos Position
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
@@ -96,7 +82,7 @@ func (lx *lexer) nextItem() item {
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
//fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -105,9 +91,9 @@ func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
line: 1,
|
||||
}
|
||||
return lx
|
||||
}
|
||||
@@ -129,13 +115,30 @@ func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx lexer) getPos() Position {
|
||||
p := Position{
|
||||
Line: lx.line,
|
||||
Start: lx.start,
|
||||
Len: lx.pos - lx.start,
|
||||
}
|
||||
if p.Len <= 0 {
|
||||
p.Len = 1
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
// Needed for multiline strings ending with an incomplete UTF-8 sequence.
|
||||
if lx.start > lx.pos {
|
||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||
return
|
||||
}
|
||||
lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
@@ -160,7 +163,13 @@ func (lx *lexer) next() (r rune) {
|
||||
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
if r == utf8.RuneError {
|
||||
lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
|
||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||
return utf8.RuneError
|
||||
}
|
||||
|
||||
// Note: don't use peek() here, as this calls next().
|
||||
if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) {
|
||||
lx.errorControlChar(r)
|
||||
return utf8.RuneError
|
||||
}
|
||||
|
||||
@@ -188,6 +197,7 @@ func (lx *lexer) backup() {
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.prevWidths[2] = lx.prevWidths[3]
|
||||
lx.nprev--
|
||||
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
@@ -223,18 +233,58 @@ func (lx *lexer) skip(pred func(rune) bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// error stops all lexing by emitting an error and returning `nil`.
|
||||
//
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
func (lx *lexer) error(err error) stateFn {
|
||||
if lx.atEOF {
|
||||
return lx.errorPrevLine(err)
|
||||
}
|
||||
lx.items <- item{typ: itemError, pos: lx.getPos(), err: err}
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorfPrevline is like error(), but sets the position to the last column of
|
||||
// the previous line.
|
||||
//
|
||||
// This is so that unexpected EOF or NL errors don't show on a new blank line.
|
||||
func (lx *lexer) errorPrevLine(err error) stateFn {
|
||||
pos := lx.getPos()
|
||||
pos.Line--
|
||||
pos.Len = 1
|
||||
pos.Start = lx.pos - 1
|
||||
lx.items <- item{typ: itemError, pos: pos, err: err}
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorPos is like error(), but allows explicitly setting the position.
|
||||
func (lx *lexer) errorPos(start, length int, err error) stateFn {
|
||||
pos := lx.getPos()
|
||||
pos.Start = start
|
||||
pos.Len = length
|
||||
lx.items <- item{typ: itemError, pos: pos, err: err}
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorf is like error, and creates a new error.
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
if lx.atEOF {
|
||||
pos := lx.getPos()
|
||||
pos.Line--
|
||||
pos.Len = 1
|
||||
pos.Start = lx.pos - 1
|
||||
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
|
||||
return nil
|
||||
}
|
||||
lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lx *lexer) errorControlChar(cc rune) stateFn {
|
||||
return lx.errorPos(lx.pos-1, 1, errLexControl{cc})
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
@@ -242,10 +292,10 @@ func lexTop(lx *lexer) stateFn {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
switch r {
|
||||
case commentStart:
|
||||
case '#':
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
case '[':
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
@@ -268,7 +318,7 @@ func lexTop(lx *lexer) stateFn {
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
case r == '#':
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
@@ -292,7 +342,7 @@ func lexTopEnd(lx *lexer) stateFn {
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
if lx.peek() == '[' {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
@@ -309,10 +359,8 @@ func lexTableEnd(lx *lexer) stateFn {
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf(
|
||||
"expected end of table array name delimiter %q, but got %q instead",
|
||||
arrayTableEnd, r)
|
||||
if r := lx.next(); r != ']' {
|
||||
return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
@@ -321,11 +369,11 @@ func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
case r == ']' || r == eof:
|
||||
return lx.errorf("unexpected end of table name (table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
case r == '.':
|
||||
return lx.errorf("unexpected table separator (table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
case r == '"' || r == '\'':
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexQuotedName
|
||||
@@ -342,10 +390,10 @@ func lexTableNameEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
case r == '.':
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
case r == ']':
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
|
||||
@@ -379,10 +427,10 @@ func lexQuotedName(lx *lexer) stateFn {
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case r == stringStart:
|
||||
case r == '"':
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case r == rawStringStart:
|
||||
case r == '\'':
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case r == eof:
|
||||
@@ -400,7 +448,7 @@ func lexKeyStart(lx *lexer) stateFn {
|
||||
return lx.errorf("unexpected '=': key name appears blank")
|
||||
case r == '.':
|
||||
return lx.errorf("unexpected '.': keys cannot start with a '.'")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
case r == '"' || r == '\'':
|
||||
lx.ignore()
|
||||
fallthrough
|
||||
default: // Bare key
|
||||
@@ -416,7 +464,7 @@ func lexKeyNameStart(lx *lexer) stateFn {
|
||||
return lx.errorf("unexpected '='")
|
||||
case r == '.':
|
||||
return lx.errorf("unexpected '.'")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
case r == '"' || r == '\'':
|
||||
lx.ignore()
|
||||
lx.push(lexKeyEnd)
|
||||
return lexQuotedName
|
||||
@@ -434,7 +482,7 @@ func lexKeyEnd(lx *lexer) stateFn {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF; expected key separator %q", keySep)
|
||||
return lx.errorf("unexpected EOF; expected key separator '='")
|
||||
case r == '.':
|
||||
lx.ignore()
|
||||
return lexKeyNameStart
|
||||
@@ -461,17 +509,17 @@ func lexValue(lx *lexer) stateFn {
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
case '[':
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
case '{':
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
case '"':
|
||||
if lx.accept('"') {
|
||||
if lx.accept('"') {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
@@ -479,9 +527,9 @@ func lexValue(lx *lexer) stateFn {
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
case '\'':
|
||||
if lx.accept('\'') {
|
||||
if lx.accept('\'') {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
@@ -520,14 +568,12 @@ func lexArrayValue(lx *lexer) stateFn {
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
case r == '#':
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
case r == ',':
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
case r == ']':
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
@@ -540,22 +586,20 @@ func lexArrayValue(lx *lexer) stateFn {
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
case r == '#':
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
case r == ',':
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
case r == ']':
|
||||
return lexArrayEnd
|
||||
default:
|
||||
return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r))
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %s instead",
|
||||
arrayEnd, runeOrEOF(r))
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
@@ -574,13 +618,13 @@ func lexInlineTableValue(lx *lexer) stateFn {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||
case r == '#':
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
case r == ',':
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
case r == '}':
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
@@ -596,23 +640,21 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||
case r == '#':
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
case r == ',':
|
||||
lx.ignore()
|
||||
lx.skip(isWhitespace)
|
||||
if lx.peek() == '}' {
|
||||
return lx.errorf("trailing comma not allowed in inline tables")
|
||||
}
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
case r == '}':
|
||||
return lexInlineTableEnd
|
||||
default:
|
||||
return lx.errorf(
|
||||
"expected a comma or an inline table terminator %q, but got %s instead",
|
||||
inlineTableEnd, runeOrEOF(r))
|
||||
return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -638,14 +680,12 @@ func lexString(lx *lexer) stateFn {
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf(`unexpected EOF; expected '"'`)
|
||||
case isControl(r) || r == '\r':
|
||||
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
return lx.errorPrevLine(errLexStringNL{})
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
case r == '"':
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
@@ -660,26 +700,33 @@ func lexString(lx *lexer) stateFn {
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
default:
|
||||
return lexMultilineString
|
||||
case eof:
|
||||
return lx.errorf(`unexpected EOF; expected '"""'`)
|
||||
case '\r':
|
||||
if lx.peek() != '\n' {
|
||||
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
|
||||
}
|
||||
return lexMultilineString
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
case '"':
|
||||
/// Found " → try to read two more "".
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept('"') {
|
||||
if lx.accept('"') {
|
||||
/// Peek ahead: the string can contain " and "", including at the
|
||||
/// end: """str"""""
|
||||
/// 6 or more at the end, however, is an error.
|
||||
if lx.peek() == stringEnd {
|
||||
if lx.peek() == '"' {
|
||||
/// Check if we already lexed 5 's; if so we have 6 now, and
|
||||
/// that's just too many man!
|
||||
if strings.HasSuffix(lx.current(), `"""""`) {
|
||||
///
|
||||
/// Second check is for the edge case:
|
||||
///
|
||||
/// two quotes allowed.
|
||||
/// vv
|
||||
/// """lol \""""""
|
||||
/// ^^ ^^^---- closing three
|
||||
/// escaped
|
||||
///
|
||||
/// But ugly, but it works
|
||||
if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) {
|
||||
return lx.errorf(`unexpected '""""""'`)
|
||||
}
|
||||
lx.backup()
|
||||
@@ -699,12 +746,8 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
if isControl(r) {
|
||||
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
@@ -712,20 +755,19 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
default:
|
||||
return lexRawString
|
||||
case r == eof:
|
||||
return lx.errorf(`unexpected EOF; expected "'"`)
|
||||
case isControl(r) || r == '\r':
|
||||
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
return lx.errorPrevLine(errLexStringNL{})
|
||||
case r == '\'':
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
@@ -734,21 +776,18 @@ func lexRawString(lx *lexer) stateFn {
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
default:
|
||||
return lexMultilineRawString
|
||||
case eof:
|
||||
return lx.errorf(`unexpected EOF; expected "'''"`)
|
||||
case '\r':
|
||||
if lx.peek() != '\n' {
|
||||
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
|
||||
}
|
||||
return lexMultilineRawString
|
||||
case rawStringEnd:
|
||||
case '\'':
|
||||
/// Found ' → try to read two more ''.
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept('\'') {
|
||||
if lx.accept('\'') {
|
||||
/// Peek ahead: the string can contain ' and '', including at the
|
||||
/// end: '''str'''''
|
||||
/// 6 or more at the end, however, is an error.
|
||||
if lx.peek() == rawStringEnd {
|
||||
if lx.peek() == '\'' {
|
||||
/// Check if we already lexed 5 's; if so we have 6 now, and
|
||||
/// that's just too many man!
|
||||
if strings.HasSuffix(lx.current(), "'''''") {
|
||||
@@ -771,19 +810,14 @@ func lexMultilineRawString(lx *lexer) stateFn {
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
if isControl(r) {
|
||||
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
if isNL(lx.next()) { /// \ escaping newline.
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
@@ -817,8 +851,7 @@ func lexStringEscape(lx *lexer) stateFn {
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
return lx.error(errLexEscape{r})
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
@@ -1108,8 +1141,6 @@ func lexComment(lx *lexer) stateFn {
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
case isControl(r):
|
||||
return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
|
||||
default:
|
||||
return lexComment
|
||||
}
|
||||
@@ -1121,52 +1152,6 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return nextState
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
// Control characters except \n, \t
|
||||
func isControl(r rune) bool {
|
||||
switch r {
|
||||
case '\t', '\r', '\n':
|
||||
return false
|
||||
default:
|
||||
return (r >= 0x00 && r <= 0x1f) || r == 0x7f
|
||||
}
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isOctal(r rune) bool {
|
||||
return r >= '0' && r <= '7'
|
||||
}
|
||||
|
||||
func isBinary(r rune) bool {
|
||||
return r == '0' || r == '1'
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (s stateFn) String() string {
|
||||
name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
|
||||
if i := strings.LastIndexByte(name, '.'); i > -1 {
|
||||
@@ -1223,3 +1208,26 @@ func (itype itemType) String() string {
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
||||
|
||||
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
|
||||
func isNL(r rune) bool { return r == '\n' || r == '\r' }
|
||||
func isControl(r rune) bool { // Control characters except \t, \r, \n
|
||||
switch r {
|
||||
case '\t', '\r', '\n':
|
||||
return false
|
||||
default:
|
||||
return (r >= 0x00 && r <= 0x1f) || r == 0x7f
|
||||
}
|
||||
}
|
||||
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
|
||||
func isBinary(r rune) bool { return r == '0' || r == '1' }
|
||||
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
|
||||
}
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-'
|
||||
}
|
||||
|
||||
112
vendor/github.com/BurntSushi/toml/decode_meta.go → vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
112
vendor/github.com/BurntSushi/toml/decode_meta.go → vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
@@ -1,34 +1,40 @@
|
||||
package toml
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not be
|
||||
// inferable via reflection. In particular, whether a key has been defined and
|
||||
// the TOML type of a key.
|
||||
// MetaData allows access to meta information about TOML data that's not
|
||||
// accessible otherwise.
|
||||
//
|
||||
// It allows checking if a key is defined in the TOML data, whether any keys
|
||||
// were undecoded, and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
|
||||
keyInfo map[string]keyInfo
|
||||
mapping map[string]interface{}
|
||||
keys []Key
|
||||
decoded map[string]struct{}
|
||||
data []byte // Input file; for errors.
|
||||
}
|
||||
|
||||
// IsDefined reports if the key exists in the TOML data.
|
||||
//
|
||||
// The key should be specified hierarchically, for example to access the TOML
|
||||
// key "a.b.c" you would use:
|
||||
// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
|
||||
//
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
// Returns false for an empty key.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
var (
|
||||
hash map[string]interface{}
|
||||
ok bool
|
||||
hashOrVal interface{} = md.mapping
|
||||
)
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
@@ -45,51 +51,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
||||
// Type will return the empty string if given an empty key or a key that does
|
||||
// not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
if ki, ok := md.keyInfo[Key(key).String()]; ok {
|
||||
return ki.tomlType.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
|
||||
// values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string { return strings.Join(k, ".") }
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
if k[i] == "" {
|
||||
return `""`
|
||||
}
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return `"` + quotedReplacer.Replace(k[i]) + `"`
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
//
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
@@ -115,9 +82,40 @@ func (md *MetaData) Keys() []Key {
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
if _, ok := md.decoded[key.String()]; !ok {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
||||
|
||||
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
|
||||
// values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
ss := make([]string, len(k))
|
||||
for i := range k {
|
||||
ss[i] = k.maybeQuoted(i)
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
if k[i] == "" {
|
||||
return `""`
|
||||
}
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||
}
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
220
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
220
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -12,35 +11,29 @@ import (
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
lx *lexer
|
||||
context Key // Full key for the current hash in scope.
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
pos Position // Current position in the TOML file.
|
||||
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
context Key // Full key for the current hash in scope.
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
approxLine int // Rough approximation of line number
|
||||
implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
mapping map[string]interface{} // Map keyname → key value.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
}
|
||||
|
||||
// ParseError is used when a file can't be parsed: for example invalid integer
|
||||
// literals, duplicate keys, etc.
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int
|
||||
LastKey string
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
pe.Line, pe.LastKey, pe.Message)
|
||||
type keyInfo struct {
|
||||
pos Position
|
||||
tomlType tomlType
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(ParseError); ok {
|
||||
if pErr, ok := r.(ParseError); ok {
|
||||
pErr.input = data
|
||||
err = pErr
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
@@ -60,16 +53,21 @@ func parse(data string) (p *parser, err error) {
|
||||
if len(data) < 6 {
|
||||
ex = len(data)
|
||||
}
|
||||
if strings.ContainsRune(data[:ex], 0) {
|
||||
return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
|
||||
if i := strings.IndexRune(data[:ex], 0); i > -1 {
|
||||
return nil, ParseError{
|
||||
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
|
||||
Position: Position{Line: 1, Start: i, Len: 1},
|
||||
Line: 1,
|
||||
input: data,
|
||||
}
|
||||
}
|
||||
|
||||
p = &parser{
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
implicits: make(map[string]struct{}),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
@@ -82,24 +80,57 @@ func parse(data string) (p *parser, err error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf(format, v...)
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
Message: msg,
|
||||
Line: p.approxLine,
|
||||
LastKey: p.current(),
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: p.pos,
|
||||
Line: p.pos.Line,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
Position: it.pos,
|
||||
Line: it.pos.Line,
|
||||
LastKey: p.current(),
|
||||
err: it.err,
|
||||
})
|
||||
}
|
||||
|
||||
p.panicItemf(it, "%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) nextPos() item {
|
||||
it := p.next()
|
||||
p.pos = it.pos
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
@@ -119,11 +150,9 @@ func (p *parser) assertEqual(expected, got itemType) {
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart: // # ..
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart: // [ .. ]
|
||||
name := p.next()
|
||||
p.approxLine = name.line
|
||||
name := p.nextPos()
|
||||
|
||||
var key Key
|
||||
for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
|
||||
@@ -132,11 +161,10 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.setType("", tomlHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart: // [[ .. ]]
|
||||
name := p.next()
|
||||
p.approxLine = name.line
|
||||
name := p.nextPos()
|
||||
|
||||
var key Key
|
||||
for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
|
||||
@@ -145,13 +173,12 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemArrayTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.setType("", tomlArrayHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart: // key = ..
|
||||
outerContext := p.context
|
||||
/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
k := p.nextPos()
|
||||
var key Key
|
||||
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
|
||||
key = append(key, p.keyString(k))
|
||||
@@ -169,8 +196,9 @@ func (p *parser) topLevel(item item) {
|
||||
}
|
||||
|
||||
/// Set value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.set(p.currentKey, val, typ, vItem.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
@@ -206,9 +234,9 @@ var datetimeRepl = strings.NewReplacer(
|
||||
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
@@ -240,10 +268,10 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
|
||||
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
|
||||
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
|
||||
}
|
||||
if numHasLeadingZero(it.val) {
|
||||
p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
|
||||
p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
|
||||
}
|
||||
|
||||
num, err := strconv.ParseInt(it.val, 0, 64)
|
||||
@@ -254,7 +282,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "int64"})
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
@@ -272,18 +300,18 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
|
||||
p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
|
||||
p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
|
||||
p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
|
||||
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
|
||||
@@ -292,9 +320,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "float64"})
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
@@ -325,18 +353,21 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray)
|
||||
p.setType(p.currentKey, tomlArray, it.pos)
|
||||
|
||||
// p.setType(p.currentKey, typ)
|
||||
var (
|
||||
array []interface{}
|
||||
types []tomlType
|
||||
|
||||
// Initialize to a non-nil empty slice. This makes it consistent with
|
||||
// how S = [] decodes into a non-nil slice inside something like struct
|
||||
// { S []string }. See #338
|
||||
array = []interface{}{}
|
||||
)
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
@@ -347,6 +378,12 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
val, typ := p.value(it, true)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
|
||||
// XXX: types isn't used here, we need it to record the accurate type
|
||||
// information.
|
||||
//
|
||||
// Not entirely sure how to best store this; could use "key[0]",
|
||||
// "key[1]" notation, or maybe store it on the Array type?
|
||||
}
|
||||
return array, tomlArray
|
||||
}
|
||||
@@ -373,8 +410,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||
}
|
||||
|
||||
/// Read all key parts.
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
k := p.nextPos()
|
||||
var key Key
|
||||
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
|
||||
key = append(key, p.keyString(k))
|
||||
@@ -393,7 +429,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
p.set(p.currentKey, val, typ, it.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[p.currentKey] = val
|
||||
|
||||
@@ -408,7 +444,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
|
||||
// +/- signs, and base prefixes.
|
||||
func numHasLeadingZero(s string) bool {
|
||||
if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
|
||||
if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
|
||||
return true
|
||||
}
|
||||
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
|
||||
@@ -503,7 +539,7 @@ func (p *parser) addContext(key Key, array bool) {
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
|
||||
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
@@ -512,9 +548,10 @@ func (p *parser) addContext(key Key, array bool) {
|
||||
}
|
||||
|
||||
// set calls setValue and setType.
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType) {
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ, pos)
|
||||
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
@@ -573,28 +610,32 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
// setType sets the type of a particular value at a given key. It should be
|
||||
// called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
keyContext = append(keyContext, p.context...)
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
// Special case to make empty keys ("" = 1) work.
|
||||
// Without it it will set "" rather than `""`.
|
||||
// TODO: why is this needed? And why is this only needed here?
|
||||
if len(keyContext) == 0 {
|
||||
keyContext = Key{""}
|
||||
}
|
||||
p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
|
||||
}
|
||||
|
||||
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
||||
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
|
||||
func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
|
||||
func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
|
||||
func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) {
|
||||
p.addImplicit(key)
|
||||
p.addContext(key, false)
|
||||
@@ -622,7 +663,7 @@ func stripFirstNewline(s string) string {
|
||||
}
|
||||
|
||||
// Remove newlines inside triple-quoted strings if a line ends with "\".
|
||||
func stripEscapedNewlines(s string) string {
|
||||
func (p *parser) stripEscapedNewlines(s string) string {
|
||||
split := strings.Split(s, "\n")
|
||||
if len(split) < 1 {
|
||||
return s
|
||||
@@ -654,6 +695,10 @@ func stripEscapedNewlines(s string) string {
|
||||
continue
|
||||
}
|
||||
|
||||
if i == len(split)-1 {
|
||||
p.panicf("invalid escape: '\\ '")
|
||||
}
|
||||
|
||||
split[i] = line[:len(line)-1] // Remove \
|
||||
if len(split)-1 > i {
|
||||
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
|
||||
@@ -662,8 +707,8 @@ func stripEscapedNewlines(s string) string {
|
||||
return strings.Join(split, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
func (p *parser) replaceEscapes(it item, str string) string {
|
||||
replaced := make([]rune, 0, len(str))
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
@@ -681,10 +726,8 @@ func (p *parser) replaceEscapes(str string) string {
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case ' ', '\t':
|
||||
p.panicf("invalid escape: '\\%c'", s[r])
|
||||
return ""
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
@@ -710,14 +753,14 @@ func (p *parser) replaceEscapes(str string) string {
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
@@ -725,15 +768,14 @@ func (p *parser) replaceEscapes(str string) string {
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
4
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
4
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
@@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field {
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
var count map[reflect.Type]int
|
||||
var nextCount map[reflect.Type]int
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
@@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool {
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
func typeIsTable(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
27
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
27
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@@ -11,12 +11,27 @@ package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||
declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
|
||||
appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
|
||||
or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
|
||||
attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
|
||||
|
||||
## Special Thanks
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
|
||||
189
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
189
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
@@ -5,7 +5,6 @@ package backuptar
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -42,19 +41,14 @@ const (
|
||||
hdrCreationTime = "LIBARCHIVE.creationtime"
|
||||
)
|
||||
|
||||
func writeZeroes(w io.Writer, count int64) error {
|
||||
buf := make([]byte, 8192)
|
||||
c := len(buf)
|
||||
for i := int64(0); i < count; i += int64(c) {
|
||||
if int64(c) > count-i {
|
||||
c = int(count - i)
|
||||
}
|
||||
_, err := w.Write(buf[:c])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// zeroReader is an io.Reader that always returns 0s.
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zr zeroReader) Read(b []byte) (int, error) {
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
return nil
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
@@ -71,16 +65,26 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
return fmt.Errorf("unexpected stream %d", bhdr.Id)
|
||||
}
|
||||
|
||||
// We can't seek backwards, since we have already written that data to the tar.Writer.
|
||||
if bhdr.Offset < curOffset {
|
||||
return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset)
|
||||
}
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
err = writeZeroes(t, bhdr.Offset-curOffset)
|
||||
if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
|
||||
return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err)
|
||||
}
|
||||
if bhdr.Size == 0 {
|
||||
// A sparse block with size = 0 is used to mark the end of the sparse blocks.
|
||||
break
|
||||
}
|
||||
n, err := io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != bhdr.Size {
|
||||
return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset)
|
||||
}
|
||||
curOffset = bhdr.Offset + n
|
||||
}
|
||||
return nil
|
||||
@@ -109,6 +113,69 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
|
||||
return hdr
|
||||
}
|
||||
|
||||
// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
|
||||
// from the tar header and returns the security descriptor into a byte slice.
|
||||
func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
// Maintaining old SDDL-based behavior for backward
|
||||
// compatibility. All new tar headers written by this library
|
||||
// will have raw binary for the security descriptor.
|
||||
var sd []byte
|
||||
var err error
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
|
||||
// current file from the tar header and returns it as a byte slice.
|
||||
func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
var eas []winio.ExtendedAttribute
|
||||
var eadata []byte
|
||||
var err error
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err = winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return eadata, nil
|
||||
}
|
||||
|
||||
// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
|
||||
// and encodes it into a byte slice. The file for which this function is called must be a
|
||||
// symlink.
|
||||
func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
return winio.EncodeReparsePoint(&rp)
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
@@ -221,20 +288,44 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
}
|
||||
}
|
||||
|
||||
// The logic for copying file contents is fairly complicated due to the need for handling sparse files,
|
||||
// and the weird ways they are represented by BackupRead. A normal file will always either have a data stream
|
||||
// with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also
|
||||
// be represented using a series of sparse block streams following the data stream. Additionally, the way sparse
|
||||
// files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described
|
||||
// in the list at the bottom of this block comment.
|
||||
//
|
||||
// Sparse files can be represented in four different ways, based on the specifics of the file.
|
||||
// - Size = 0:
|
||||
// Previously: BackupRead yields no data stream and no sparse block streams.
|
||||
// Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams.
|
||||
// - Size > 0, no allocated ranges:
|
||||
// BackupRead yields a data stream with size = 0. Following is a single sparse block stream with
|
||||
// size = 0 and offset = <file size>.
|
||||
// - Size > 0, one allocated range:
|
||||
// BackupRead yields a data stream with size = <file size> containing the file contents. There are no
|
||||
// sparse block streams. This is the case if you take a normal file with contents and simply set the
|
||||
// sparse flag on it.
|
||||
// - Size > 0, multiple allocated ranges:
|
||||
// BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated
|
||||
// range of the file containing the range contents. Finally there is a sparse block stream with
|
||||
// size = 0 and offset = <file size>.
|
||||
|
||||
if dataHdr != nil {
|
||||
// A data stream was found. Copy the data.
|
||||
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
// We assume that we will either have a data stream size > 0 XOR have sparse block streams.
|
||||
if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 {
|
||||
if size != dataHdr.Size {
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
if _, err = io.Copy(t, br); err != nil {
|
||||
return fmt.Errorf("%s: copying contents from data stream: %s", name, err)
|
||||
}
|
||||
} else {
|
||||
err = copySparse(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if size > 0 {
|
||||
// As of a recent OS change, BackupRead now returns a data stream for empty sparse files.
|
||||
// These files have no sparse block streams, so skip the copySparse call if file size = 0.
|
||||
if err = copySparse(t, br); err != nil {
|
||||
return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -279,7 +370,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
} else {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return errors.New("tar of sparse alternate data streams is unsupported")
|
||||
return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name)
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
@@ -330,21 +421,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sd, err := SecurityDescriptorFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
@@ -360,25 +440,12 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
|
||||
eadata, err := ExtendedAttributesFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(eadata) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
@@ -392,13 +459,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
reparse := EncodeReparsePointFromTarHeader(hdr)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
@@ -411,7 +474,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
|
||||
6
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
6
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -143,6 +144,11 @@ func (f *win32File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClosed checks if the file has been closed
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.isSet()
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
|
||||
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
@@ -1,9 +0,0 @@
|
||||
module github.com/Microsoft/go-winio
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
)
|
||||
14
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
14
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
@@ -1,14 +0,0 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
17
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
17
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) IsClosed() bool {
|
||||
return conn.sock.IsClosed()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||
if conn.IsClosed() {
|
||||
return ErrFileClosed
|
||||
}
|
||||
|
||||
err := syscall.Shutdown(conn.sock.handle, how)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
@@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||
// notifying the other endpoint that no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
|
||||
9
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
9
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@@ -14,8 +14,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
@@ -41,13 +39,6 @@ type Version uint8
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
|
||||
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build !windows
|
||||
|
||||
package guid
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type as that is only available to builds
|
||||
// targeted at `windows`. The representation matches that used by native Windows
|
||||
// code.
|
||||
type GUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package guid
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
15
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
@@ -3,11 +3,10 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
// Stat (to determine if `name` is a directory).
|
||||
s, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
|
||||
return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
// Get a handle to the file/directory. Must defer Close on success.
|
||||
@@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
sd := uintptr(0)
|
||||
origDACL := uintptr(0)
|
||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
|
||||
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||
|
||||
@@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
|
||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
|
||||
return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
}
|
||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
|
||||
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
@@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
// Generate pointers to the SIDs based on the string SIDs
|
||||
sid, err := syscall.StringToSid(sidVmGroup)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
|
||||
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
|
||||
}
|
||||
|
||||
inheritance := inheritModeNoInheritance
|
||||
@@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
|
||||
modifiedDACL := uintptr(0)
|
||||
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
||||
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
|
||||
return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
return modifiedDACL, nil
|
||||
|
||||
6
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
6
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
@@ -2,6 +2,6 @@ package security
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
|
||||
|
||||
//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo
|
||||
//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo
|
||||
//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW
|
||||
//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo
|
||||
//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo
|
||||
//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW
|
||||
|
||||
24
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
24
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
@@ -45,26 +45,26 @@ var (
|
||||
procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
|
||||
)
|
||||
|
||||
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
67
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
67
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package vhd
|
||||
@@ -7,17 +8,16 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk
|
||||
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk
|
||||
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath
|
||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
|
||||
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
|
||||
|
||||
type (
|
||||
CreateVirtualDiskFlag uint32
|
||||
@@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct {
|
||||
Version2 OpenVersion2
|
||||
}
|
||||
|
||||
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
|
||||
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
|
||||
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
|
||||
type openVersion2 struct {
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
resiliencyGUID guid.GUID
|
||||
}
|
||||
|
||||
type openVirtualDiskParameters struct {
|
||||
version uint32
|
||||
version2 openVersion2
|
||||
}
|
||||
|
||||
type AttachVersion2 struct {
|
||||
RestrictedOffset uint64
|
||||
RestrictedLength uint64
|
||||
}
|
||||
|
||||
type AttachVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version uint32
|
||||
Version2 AttachVersion2
|
||||
}
|
||||
|
||||
@@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return syscall.CloseHandle(handle)
|
||||
}
|
||||
|
||||
// DetachVirtualDisk detaches a virtual hard disk by handle.
|
||||
func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
||||
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
||||
return errors.Wrap(err, "failed to detach virtual disk")
|
||||
return fmt.Errorf("failed to detach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
|
||||
parameters,
|
||||
nil,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) {
|
||||
AttachVirtualDiskFlagNone,
|
||||
¶ms,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
)
|
||||
if parameters.Version != 2 {
|
||||
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
||||
}
|
||||
if parameters.Version2.GetInfoOnly {
|
||||
getInfoOnly = 1
|
||||
}
|
||||
if parameters.Version2.ReadOnly {
|
||||
readOnly = 1
|
||||
}
|
||||
params := &openVirtualDiskParameters{
|
||||
version: parameters.Version,
|
||||
version2: openVersion2{
|
||||
getInfoOnly,
|
||||
readOnly,
|
||||
parameters.Version2.ResiliencyGUID,
|
||||
},
|
||||
}
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
vhdPath,
|
||||
uint32(virtualDiskAccessMask),
|
||||
uint32(openVirtualDiskFlags),
|
||||
parameters,
|
||||
params,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return 0, errors.Wrap(err, "failed to open virtual disk")
|
||||
return 0, fmt.Errorf("failed to open virtual disk: %w", err)
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
|
||||
nil,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return handle, errors.Wrap(err, "failed to create virtual disk")
|
||||
return handle, fmt.Errorf("failed to create virtual disk: %w", err)
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
|
||||
&diskPathSizeInBytes,
|
||||
&diskPhysicalPathBuf[0],
|
||||
); err != nil {
|
||||
return "", errors.Wrap(err, "failed to get disk physical path")
|
||||
return "", fmt.Errorf("failed to get disk physical path: %w", err)
|
||||
}
|
||||
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
|
||||
}
|
||||
@@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
|
||||
createParams,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create differencing vhd: %s", err)
|
||||
return fmt.Errorf("failed to create differencing vhd: %w", err)
|
||||
}
|
||||
if err := syscall.CloseHandle(vhdHandle); err != nil {
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
52
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
52
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
@@ -47,60 +47,60 @@ var (
|
||||
procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
|
||||
)
|
||||
|
||||
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
_p0, win32err = syscall.UTF16PtrFromString(path)
|
||||
if win32err != nil {
|
||||
return
|
||||
}
|
||||
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle)
|
||||
}
|
||||
|
||||
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
_p0, win32err = syscall.UTF16PtrFromString(path)
|
||||
if win32err != nil {
|
||||
return
|
||||
}
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user