mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
31 Commits
release-1.
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
378e64f554 | ||
|
|
2b3c446078 | ||
|
|
0415db8318 | ||
|
|
30097f001e | ||
|
|
468f3a9e5b | ||
|
|
16cca3522d | ||
|
|
ec2beb9181 | ||
|
|
8c924b825c | ||
|
|
bd52afc66a | ||
|
|
34f0743c06 | ||
|
|
0c668ba522 | ||
|
|
7be3d4f37a | ||
|
|
69be2cbadc | ||
|
|
6153a02cef | ||
|
|
2b16a1ccfb | ||
|
|
b0fbccc660 | ||
|
|
4f8c1a820e | ||
|
|
c20c32dc25 | ||
|
|
d2add6d523 | ||
|
|
f95219501d | ||
|
|
2c2c2e71d8 | ||
|
|
4414e52e96 | ||
|
|
1987f916b1 | ||
|
|
433697232e | ||
|
|
1d50fad8d6 | ||
|
|
540efb3744 | ||
|
|
16c5bbadf7 | ||
|
|
875bb42594 | ||
|
|
1186cc6bce | ||
|
|
311f61f1aa | ||
|
|
796c9cc041 |
94
.cirrus.yml
94
.cirrus.yml
@@ -24,13 +24,19 @@ env:
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-36"
|
||||
PRIOR_FEDORA_NAME: "fedora-35"
|
||||
UBUNTU_NAME: "ubuntu-2110"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c5495735033528320"
|
||||
IMAGE_SUFFIX: "c4955393725038592"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
|
||||
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Built along with the standard PR-based workflow in c/automation_images
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
|
||||
@@ -47,7 +53,7 @@ validate_task:
|
||||
# The git-validation tool doesn't work well on branch or tag push,
|
||||
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
|
||||
# Only do validation for PRs.
|
||||
only_if: &is_pr $CIRRUS_PR != ''
|
||||
only_if: $CIRRUS_PR != ''
|
||||
container:
|
||||
image: '${SKOPEO_CIDEV_CONTAINER_FQIN}'
|
||||
cpu: 4
|
||||
@@ -57,7 +63,7 @@ validate_task:
|
||||
make vendor && hack/tree_status.sh
|
||||
|
||||
doccheck_task:
|
||||
only_if: *is_pr
|
||||
only_if: $CIRRUS_PR != ''
|
||||
depends_on:
|
||||
- validate
|
||||
container:
|
||||
@@ -74,35 +80,13 @@ doccheck_task:
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
|
||||
|
||||
osx_task:
|
||||
# Run for regular PRs and those with [CI:BUILD] but not [CI:DOCS]
|
||||
only_if: ¬_docs_multiarch >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- validate
|
||||
macos_instance:
|
||||
image: catalina-xcode
|
||||
setup_script: |
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
brew update
|
||||
brew install gpgme go go-md2man
|
||||
go install golang.org/x/lint/golint@latest
|
||||
test_script: |
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
go version
|
||||
go env
|
||||
make validate-local test-unit-local bin/skopeo
|
||||
sudo make install
|
||||
/usr/local/bin/skopeo -v
|
||||
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
only_if: *not_docs_multiarch
|
||||
only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance: &standardvm
|
||||
gce_instance:
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
@@ -126,11 +110,7 @@ cross_task:
|
||||
#####
|
||||
test_skopeo_task:
|
||||
alias: test_skopeo
|
||||
# Don't test for [CI:DOCS], [CI:BUILD], or 'multiarch' cron.
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
only_if: *not_docs
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
@@ -163,49 +143,6 @@ test_skopeo_task:
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
|
||||
|
||||
|
||||
image_build_task: &image-build
|
||||
name: "Build multi-arch $CTXDIR"
|
||||
alias: image_build
|
||||
# Some of these container images take > 1h to build, limit
|
||||
# this task to a specific Cirrus-Cron entry with this name.
|
||||
only_if: $CIRRUS_CRON == 'multiarch'
|
||||
timeout_in: 120m # emulation is sssllllooooowwww
|
||||
gce_instance:
|
||||
<<: *standardvm
|
||||
image_name: build-push-${IMAGE_SUFFIX}
|
||||
# More muscle required for parallel multi-arch build
|
||||
type: "n2-standard-4"
|
||||
matrix:
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/upstream
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/testing
|
||||
- env:
|
||||
CTXDIR: contrib/skopeoimage/stable
|
||||
env:
|
||||
SKOPEO_USERNAME: ENCRYPTED[4195884d23b154553f2ddb26a63fc9fbca50ba77b3e447e4da685d8639ed9bc94b9a86a9c77272c8c80d32ead9ca48da]
|
||||
SKOPEO_PASSWORD: ENCRYPTED[36e06f9befd17e5da2d60260edb9ef0d40e6312e2bba4cf881d383f1b8b5a18c8e5a553aea2fdebf39cebc6bd3b3f9de]
|
||||
CONTAINERS_USERNAME: ENCRYPTED[dd722c734641f103b394a3a834d51ca5415347e378637cf98ee1f99e64aad2ec3dbd4664c0d94cb0e06b83d89e9bbe91]
|
||||
CONTAINERS_PASSWORD: ENCRYPTED[d8b0fac87fe251cedd26c864ba800480f9e0570440b9eb264265b67411b253a626fb69d519e188e6c9a7f525860ddb26]
|
||||
main_script:
|
||||
- source /etc/automation_environment
|
||||
- main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR
|
||||
|
||||
|
||||
test_image_build_task:
|
||||
<<: *image-build
|
||||
alias: test_image_build
|
||||
# Allow this to run inside a PR w/ [CI:BUILD] only.
|
||||
only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
|
||||
# This takes a LONG time, only run when requested. N/B: Any task
|
||||
# made to depend on this one will block FOREVER unless triggered.
|
||||
# DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
|
||||
trigger_type: manual
|
||||
# Overwrite all 'env', don't push anything, just do the build.
|
||||
env:
|
||||
DRYRUN: 1
|
||||
|
||||
|
||||
# This task is critical. It updates the "last-used by" timestamp stored
|
||||
# in metadata for all VM images. This mechanism functions in tandem with
|
||||
# an out-of-band pruning operation to remove disused VM images.
|
||||
@@ -218,9 +155,10 @@ meta_task:
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: |
|
||||
IMGNAMES: >-
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
@@ -240,10 +178,8 @@ success_task:
|
||||
depends_on:
|
||||
- validate
|
||||
- doccheck
|
||||
- osx
|
||||
- cross
|
||||
- test_skopeo
|
||||
- image_build
|
||||
- meta
|
||||
container: *smallcontainer
|
||||
env:
|
||||
|
||||
109
.github/workflows/check_cirrus_cron.yml
vendored
109
.github/workflows/check_cirrus_cron.yml
vendored
@@ -1,17 +1,102 @@
|
||||
---
|
||||
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
|
||||
|
||||
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
# Required to un-FUBAR default ${{github.workflow}} value
|
||||
name: check_cirrus_cron
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
# Debug-mode can reveal secrets, only enable by a secret value.
|
||||
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
|
||||
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
|
||||
# CSV listing of e-mail addresses for delivery failure or error notices
|
||||
RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io
|
||||
# Filename for table of cron-name to build-id data
|
||||
# (must be in $GITHUB_WORKSPACE/artifacts/)
|
||||
NAME_ID_FILEPATH: './artifacts/name_id.txt'
|
||||
|
||||
jobs:
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_failures:
|
||||
uses: containers/buildah/.github/workflows/check_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
cron_failures:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Avoid duplicating cron_failures.sh in skopeo repo.
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: containers/podman
|
||||
path: '_podman'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get failed cron names and Build IDs
|
||||
id: cron
|
||||
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
shell: bash
|
||||
# Must be inline, since context expressions are used.
|
||||
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
|
||||
run: |
|
||||
set -eo pipefail
|
||||
(
|
||||
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
|
||||
echo ""
|
||||
|
||||
while read -r NAME BID; do
|
||||
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
|
||||
done < "$NAME_ID_FILEPATH"
|
||||
|
||||
echo ""
|
||||
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
|
||||
# Separate content from sendgrid.com automatic footer.
|
||||
echo ""
|
||||
echo ""
|
||||
) > ./artifacts/email_body.txt
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
name: Send failure notification e-mail
|
||||
# Ref: https://github.com/dawidd6/action-send-mail
|
||||
uses: dawidd6/action-send-mail@v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Cirrus-CI cron build failures on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: file://./artifacts/email_body.txt
|
||||
|
||||
- if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ github.job }}_artifacts
|
||||
path: artifacts/*
|
||||
|
||||
- if: failure()
|
||||
name: Send error notification e-mail
|
||||
uses: dawidd6/action-send-mail@v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Github workflow error on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"
|
||||
|
||||
209
.github/workflows/multi-arch-build.yaml
vendored
Normal file
209
.github/workflows/multi-arch-build.yaml
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
---
|
||||
|
||||
# Please see contrib/<reponame>image/README.md for details on the intentions
|
||||
# of this workflow.
|
||||
#
|
||||
# BIG FAT WARNING: This workflow is duplicated across containers/skopeo,
|
||||
# containers/buildah, and containers/podman. ANY AND
|
||||
# ALL CHANGES MADE HERE MUST BE MANUALLY DUPLICATED
|
||||
# TO THE OTHER REPOS.
|
||||
|
||||
name: build multi-arch images
|
||||
|
||||
on:
|
||||
# Upstream tends to be very active, with many merges per day.
|
||||
# Only run this daily via cron schedule, or manually, not by branch push.
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
# allows to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
multi:
|
||||
name: multi-arch image build
|
||||
env:
|
||||
REPONAME: skopeo # No easy way to parse this out of $GITHUB_REPOSITORY
|
||||
# Server/namespace value used to format FQIN
|
||||
REPONAME_QUAY_REGISTRY: quay.io/skopeo
|
||||
CONTAINERS_QUAY_REGISTRY: quay.io/containers
|
||||
# list of architectures for build
|
||||
PLATFORMS: linux/amd64,linux/s390x,linux/ppc64le,linux/arm64
|
||||
# Command to execute in container to obtain project version number
|
||||
VERSION_CMD: "--version" # skopeo is the entrypoint
|
||||
|
||||
# build several images (upstream, testing, stable) in parallel
|
||||
strategy:
|
||||
# By default, failure of one matrix item cancels all others
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Builds are located under contrib/<reponame>image/<source> directory
|
||||
source:
|
||||
- upstream
|
||||
- testing
|
||||
- stable
|
||||
runs-on: ubuntu-latest
|
||||
# internal registry caches build for inspection before push
|
||||
services:
|
||||
registry:
|
||||
image: quay.io/libpod/registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: network=host
|
||||
install: true
|
||||
|
||||
- name: Build and locally push image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
|
||||
# Simple verification that stable images work, and
|
||||
# also grab version number use in forming the FQIN.
|
||||
- name: amd64 container sniff test
|
||||
if: matrix.source == 'stable'
|
||||
id: sniff_test
|
||||
run: |
|
||||
podman pull --tls-verify=false \
|
||||
localhost:5000/$REPONAME/${{ matrix.source }}
|
||||
VERSION_OUTPUT=$(podman run \
|
||||
localhost:5000/$REPONAME/${{ matrix.source }} \
|
||||
$VERSION_CMD)
|
||||
echo "$VERSION_OUTPUT"
|
||||
VERSION=$(awk -r -e "/^${REPONAME} version /"'{print $3}' <<<"$VERSION_OUTPUT")
|
||||
test -n "$VERSION"
|
||||
echo "::set-output name=version::$VERSION"
|
||||
|
||||
- name: Generate image FQIN(s) to push
|
||||
id: reponame_reg
|
||||
run: |
|
||||
if [[ "${{ matrix.source }}" == 'stable' ]]; then
|
||||
# The command version in image just built
|
||||
VERSION='v${{ steps.sniff_test.outputs.version }}'
|
||||
# workaround vim syntax-highlight bug: '
|
||||
# Push both new|updated version-tag and latest-tag FQINs
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/stable:$VERSION,$REPONAME_QUAY_REGISTRY/stable:latest"
|
||||
elif [[ "${{ matrix.source }}" == 'testing' ]]; then
|
||||
# Assume some contents changed, always push latest testing.
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/testing:latest"
|
||||
elif [[ "${{ matrix.source }}" == 'upstream' ]]; then
|
||||
# Assume some contents changed, always push latest upstream.
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/upstream:latest"
|
||||
else
|
||||
echo "::error::Unknown matrix item '${{ matrix.source }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "::warning::Pushing $FQIN"
|
||||
echo "::set-output name=fqin::${FQIN}"
|
||||
echo '::set-output name=push::true'
|
||||
|
||||
# This is substantially similar to the above logic,
|
||||
# but only handles $CONTAINERS_QUAY_REGISTRY for
|
||||
# the stable "latest" and named-version tagged images.
|
||||
- name: Generate containers reg. image FQIN(s)
|
||||
if: matrix.source == 'stable'
|
||||
id: containers_reg
|
||||
run: |
|
||||
VERSION='v${{ steps.sniff_test.outputs.version }}'
|
||||
# workaround vim syntax-highlight bug: '
|
||||
# Push both new|updated version-tag and latest-tag FQINs
|
||||
FQIN="$CONTAINERS_QUAY_REGISTRY/$REPONAME:$VERSION,$CONTAINERS_QUAY_REGISTRY/$REPONAME:latest"
|
||||
echo "::warning::Pushing $FQIN"
|
||||
echo "::set-output name=fqin::${FQIN}"
|
||||
echo '::set-output name=push::true'
|
||||
|
||||
- name: Define LABELS multi-line env. var. value
|
||||
run: |
|
||||
# This is a really hacky/strange workflow idiom, required
|
||||
# for setting multi-line $LABELS value for consumption in
|
||||
# a future step. There is literally no cleaner way to do this :<
|
||||
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#multiline-strings
|
||||
function set_labels() {
|
||||
echo 'LABELS<<DELIMITER' >> "$GITHUB_ENV"
|
||||
for line; do
|
||||
echo "$line" | tee -a "$GITHUB_ENV"
|
||||
done
|
||||
echo "DELIMITER" >> "$GITHUB_ENV"
|
||||
}
|
||||
|
||||
declare -a lines
|
||||
lines=(\
|
||||
"org.opencontainers.image.source=https://github.com/${GITHUB_REPOSITORY}.git"
|
||||
"org.opencontainers.image.revision=${GITHUB_SHA}"
|
||||
"org.opencontainers.image.created=$(date -u --iso-8601=seconds)"
|
||||
)
|
||||
|
||||
# Only the 'stable' matrix source obtains $VERSION
|
||||
if [[ "${{ matrix.source }}" == "stable" ]]; then
|
||||
lines+=(\
|
||||
"org.opencontainers.image.version=${{ steps.sniff_test.outputs.version }}"
|
||||
)
|
||||
fi
|
||||
|
||||
set_labels "${lines[@]}"
|
||||
|
||||
# Separate steps to login and push for $REPONAME_QUAY_REGISTRY and
|
||||
# $CONTAINERS_QUAY_REGISTRY are required, because 2 sets of credentials
|
||||
# are used and namespaced within the registry. At the same time, reuse
|
||||
# of non-shell steps is not supported by Github Actions nor are YAML
|
||||
# anchors/aliases, nor composite actions.
|
||||
|
||||
# Push to $REPONAME_QUAY_REGISTRY for stable, testing. and upstream
|
||||
- name: Login to ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
uses: docker/login-action@v1
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
registry: ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
# N/B: Secrets are not passed to workflows that are triggered
|
||||
# by a pull request from a fork
|
||||
username: ${{ secrets.REPONAME_QUAY_USERNAME }}
|
||||
password: ${{ secrets.REPONAME_QUAY_PASSWORD }}
|
||||
|
||||
- name: Push images to ${{ steps.reponame_reg.outputs.fqin }}
|
||||
uses: docker/build-push-action@v2
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
cache-to: type=inline
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: ${{ steps.reponame_reg.outputs.fqin }}
|
||||
labels: |
|
||||
${{ env.LABELS }}
|
||||
|
||||
# Push to $CONTAINERS_QUAY_REGISTRY only stable
|
||||
- name: Login to ${{ env.CONTAINERS_QUAY_REGISTRY }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ${{ env.CONTAINERS_QUAY_REGISTRY}}
|
||||
username: ${{ secrets.CONTAINERS_QUAY_USERNAME }}
|
||||
password: ${{ secrets.CONTAINERS_QUAY_PASSWORD }}
|
||||
|
||||
- name: Push images to ${{ steps.containers_reg.outputs.fqin }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
cache-to: type=inline
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: ${{ steps.containers_reg.outputs.fqin }}
|
||||
labels: |
|
||||
${{ env.LABELS }}
|
||||
10
.github/workflows/stale.yml
vendored
10
.github/workflows/stale.yml
vendored
@@ -7,17 +7,13 @@ on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
permissions:
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@98ed4cb500039dbcccf4bd9bedada4d0187f2757 # v3
|
||||
- uses: actions/stale@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,7 +2,7 @@
|
||||
/layers-*
|
||||
/skopeo
|
||||
result
|
||||
/completions/
|
||||
|
||||
# ignore JetBrains IDEs (GoLand) config folder
|
||||
.idea
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ When new PRs for [containers/image](https://github.com/containers/image) break `
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
IRC channel on `irc.libera.chat` called `#container-projects`
|
||||
IRC group on `irc.freenode.net` called `container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
|
||||
70
Makefile
70
Makefile
@@ -2,22 +2,23 @@
|
||||
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
# On some platforms (eg. macOS, FreeBSD) gpgme is installed in /usr/local/ but /usr/local/include/ is
|
||||
# not in the default search path. Rather than hard-code this directory, use gpgme-config.
|
||||
# Sadly that must be done at the top-level user instead of locally in the gpgme subpackage, because cgo
|
||||
# supports only pkg-config, not general shell scripts, and gpgme does not install a pkg-config file.
|
||||
# If gpgme is not installed or gpgme-config can’t be found for other reasons, the error is silently ignored
|
||||
# (and the user will probably find out because the cgo compilation will fail).
|
||||
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
|
||||
|
||||
# The following variables very roughly follow https://www.gnu.org/prep/standards/standards.html#Makefile-Conventions .
|
||||
DESTDIR ?=
|
||||
PREFIX ?= /usr/local
|
||||
ifeq ($(shell uname -s),FreeBSD)
|
||||
CONTAINERSCONFDIR ?= /usr/local/etc/containers
|
||||
else
|
||||
CONTAINERSCONFDIR ?= /etc/containers
|
||||
endif
|
||||
REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d
|
||||
LOOKASIDEDIR ?= /var/lib/containers/sigstore
|
||||
SIGSTOREDIR ?= /var/lib/containers/sigstore
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man
|
||||
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
ZSHINSTALLDIR=${PREFIX}/share/zsh/site-functions
|
||||
FISHINSTALLDIR=${PREFIX}/share/fish/vendor_completions.d
|
||||
BASHCOMPLETIONSDIR ?= ${PREFIX}/share/bash-completion/completions
|
||||
|
||||
GO ?= go
|
||||
GOBIN := $(shell $(GO) env GOBIN)
|
||||
@@ -28,15 +29,10 @@ ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
# Scripts may also use CONTAINER_RUNTIME, so we need to export it.
|
||||
# Note possibly non-obvious aspects of this:
|
||||
# - We need to use 'command -v' here, not 'which', for compatibility with MacOS.
|
||||
# - GNU Make 4.2.1 (included in Ubuntu 20.04) incorrectly tries to avoid invoking
|
||||
# a shell, and fails because there is no /usr/bin/command. The trailing ';' in
|
||||
# $(shell … ;) defeats that heuristic (recommended in
|
||||
# https://savannah.gnu.org/bugs/index.php?57625 ).
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman ;),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man ;),go-md2man,$(GOBIN)/go-md2man)
|
||||
# Multiple scripts are sensitive to this value, make sure it's exported/available
|
||||
# N/B: Need to use 'command -v' here for compatibility with MacOS.
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources.
|
||||
# See also hack/make.sh.
|
||||
@@ -55,6 +51,8 @@ ifeq ($(GOOS), linux)
|
||||
endif
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
|
||||
# You can increase test output verbosity with the option '-test.vv'.
|
||||
# You can select certain tests to run, with `-test.run <regex>` for example:
|
||||
@@ -90,7 +88,7 @@ endif
|
||||
CONTAINER_GOSRC = /src/github.com/containers/skopeo
|
||||
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
|
||||
|
||||
GIT_COMMIT := $(shell GIT_CEILING_DIRECTORIES=$$(cd ..; pwd) git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
@@ -139,7 +137,7 @@ binary: cmd/skopeo
|
||||
# Build w/o using containers
|
||||
.PHONY: bin/skopeo
|
||||
bin/skopeo:
|
||||
$(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
bin/skopeo.%:
|
||||
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO) build $(MOD_VENDOR) ${SKOPEO_LDFLAGS} -tags "containers_image_openpgp $(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
local-cross: bin/skopeo.darwin.amd64 bin/skopeo.linux.arm bin/skopeo.linux.arm64 bin/skopeo.windows.386.exe bin/skopeo.windows.amd64.exe
|
||||
@@ -154,19 +152,11 @@ docs: $(MANPAGES)
|
||||
docs-in-container:
|
||||
${CONTAINER_RUN} $(MAKE) docs $(if $(DEBUG),DEBUG=$(DEBUG))
|
||||
|
||||
.PHONY: completions
|
||||
completions: bin/skopeo
|
||||
install -d -m 755 completions/bash completions/zsh completions/fish completions/powershell
|
||||
./bin/skopeo completion bash >| completions/bash/skopeo
|
||||
./bin/skopeo completion zsh >| completions/zsh/_skopeo
|
||||
./bin/skopeo completion fish >| completions/fish/skopeo.fish
|
||||
./bin/skopeo completion powershell >| completions/powershell/skopeo.ps1
|
||||
|
||||
clean:
|
||||
rm -rf bin docs/*.1 completions/
|
||||
rm -rf bin docs/*.1
|
||||
|
||||
install: install-binary install-docs install-completions
|
||||
install -d -m 755 ${DESTDIR}${LOOKASIDEDIR}
|
||||
install -d -m 755 ${DESTDIR}${SIGSTOREDIR}
|
||||
install -d -m 755 ${DESTDIR}${CONTAINERSCONFDIR}
|
||||
install -m 644 default-policy.json ${DESTDIR}${CONTAINERSCONFDIR}/policy.json
|
||||
install -d -m 755 ${DESTDIR}${REGISTRIESDDIR}
|
||||
@@ -182,14 +172,9 @@ ifneq ($(DISABLE_DOCS), 1)
|
||||
install -m 644 docs/*.1 ${DESTDIR}${MANDIR}/man1
|
||||
endif
|
||||
|
||||
install-completions: completions
|
||||
install -d -m 755 ${DESTDIR}${BASHINSTALLDIR}
|
||||
install -m 644 completions/bash/skopeo ${DESTDIR}${BASHINSTALLDIR}
|
||||
install -d -m 755 ${DESTDIR}${ZSHINSTALLDIR}
|
||||
install -m 644 completions/zsh/_skopeo ${DESTDIR}${ZSHINSTALLDIR}
|
||||
install -d -m 755 ${DESTDIR}${FISHINSTALLDIR}
|
||||
install -m 644 completions/fish/skopeo.fish ${DESTDIR}${FISHINSTALLDIR}
|
||||
# There is no common location for powershell files so do not install them. Users have to source the file from their powershell profile.
|
||||
install-completions:
|
||||
install -m 755 -d ${DESTDIR}${BASHCOMPLETIONSDIR}
|
||||
install -m 644 completions/bash/skopeo ${DESTDIR}${BASHCOMPLETIONSDIR}/skopeo
|
||||
|
||||
shell:
|
||||
$(CONTAINER_RUN) bash
|
||||
@@ -242,7 +227,7 @@ validate-docs:
|
||||
hack/xref-helpmsgs-manpages
|
||||
|
||||
test-unit-local: bin/skopeo
|
||||
$(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
$(GPGME_ENV) $(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor:
|
||||
$(GO) mod tidy
|
||||
@@ -250,9 +235,4 @@ vendor:
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src golang $(MAKE) vendor
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
rpm:
|
||||
rpkg local
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
|
||||
|
||||
@@ -207,7 +207,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between registry repositories and local directories. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
35
cmd/skopeo/cgo_pthread_ordering_workaround.go
Normal file
35
cmd/skopeo/cgo_pthread_ordering_workaround.go
Normal file
@@ -0,0 +1,35 @@
|
||||
//go:build !containers_image_openpgp
|
||||
// +build !containers_image_openpgp
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
This is a pretty horrible workaround. Due to a glibc bug
|
||||
https://bugzilla.redhat.com/show_bug.cgi?id=1326903 , we must ensure we link
|
||||
with -lgpgme before -lpthread. Such arguments come from various packages
|
||||
using cgo, and the ordering of these arguments is, with current (go tool link),
|
||||
dependent on the order in which the cgo-using packages are found in a
|
||||
breadth-first search following dependencies, starting from “main”.
|
||||
|
||||
Thus, if
|
||||
import "net"
|
||||
is processed before
|
||||
import "…/skopeo/signature"
|
||||
it will, in the next level of the BFS, pull in "runtime/cgo" (a dependency of
|
||||
"net") before "mtrmac/gpgme" (a dependency of "…/skopeo/signature"), causing
|
||||
-lpthread (used by "runtime/cgo") to be used before -lgpgme.
|
||||
|
||||
This might be possible to work around by careful import ordering, or by removing
|
||||
a direct dependency on "net", but that would be very fragile.
|
||||
|
||||
So, until the above bug is fixed, add -lgpgme directly in the "main" package
|
||||
to ensure the needed build order.
|
||||
|
||||
Unfortunately, this workaround needs to be applied at the top level of any user
|
||||
of "…/skopeo/signature"; it cannot be added to "…/skopeo/signature" itself,
|
||||
by that time this package is first processed by the linker, a -lpthread may
|
||||
already be queued and it would be too late.
|
||||
*/
|
||||
|
||||
// #cgo LDFLAGS: -lgpgme
|
||||
import "C"
|
||||
@@ -1,16 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// autocompleteSupportedTransports list all supported transports with the colon suffix.
|
||||
func autocompleteSupportedTransports(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
tps := transports.ListNames()
|
||||
suggestions := make([]string, 0, len(tps))
|
||||
for _, tp := range tps {
|
||||
suggestions = append(suggestions, tp+":")
|
||||
}
|
||||
return suggestions, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
@@ -21,26 +21,24 @@ import (
|
||||
)
|
||||
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.Options
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing (for either signature format, but only one of them)
|
||||
signIdentity string // Identity of the signed image, must be a fully specified docker reference
|
||||
digestFile string // Write digest to this file
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
multiArch commonFlag.OptionalString // How to handle multi architecture images
|
||||
preserveDigests bool // Preserve digests during copy
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
global *globalOptions
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing
|
||||
digestFile string // Write digest to this file
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
multiArch commonFlag.OptionalString // How to handle multi architecture images
|
||||
preserveDigests bool // Preserve digests during copy
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -65,9 +63,8 @@ Supported transports:
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
@@ -83,9 +80,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "Read a passphrase for signing an image from `PATH`")
|
||||
flags.StringVar(&opts.signIdentity, "sign-identity", "", "Identity of signed image, must be a fully specified docker reference. Defaults to the target docker reference.")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "File that contains a passphrase for the --sign-by key")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
@@ -115,7 +110,7 @@ func parseMultiArch(multiArch string) (copy.ImageListSelection, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
@@ -130,11 +125,7 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "tearing down policy context", err)
|
||||
}
|
||||
}()
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
@@ -231,54 +222,25 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
decConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
// c/image/copy.Image does allow creating both simple signing and sigstore signatures simultaneously,
|
||||
// with independent passphrases, but that would make the CLI probably too confusing.
|
||||
// For now, use the passphrase with either, but only one of them.
|
||||
if opts.signPassphraseFile != "" && opts.signByFingerprint != "" && opts.signBySigstorePrivateKey != "" {
|
||||
return fmt.Errorf("Only one of --sign-by and sign-by-sigstore-private-key can be used with sign-passphrase-file")
|
||||
}
|
||||
var passphrase string
|
||||
if opts.signPassphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} else if opts.signBySigstorePrivateKey != "" {
|
||||
p, err := promptForPassphrase(opts.signBySigstorePrivateKey, os.Stdin, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} // opts.signByFingerprint triggers a GPG-agent passphrase prompt, possibly using a more secure channel, so we usually shouldn’t prompt ourselves if no passphrase was explicitly provided.
|
||||
|
||||
var signIdentity reference.Named = nil
|
||||
if opts.signIdentity != "" {
|
||||
signIdentity, err = reference.ParseNamed(opts.signIdentity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse --sign-identity: %v", err)
|
||||
}
|
||||
passphrase, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(destRef.Transport())
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
|
||||
SignIdentity: signIdentity,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
ImageListSelection: imageListSelection,
|
||||
PreserveDigests: opts.preserveDigests,
|
||||
OciDecryptConfig: decConfig,
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
ImageListSelection: imageListSelection,
|
||||
PreserveDigests: opts.preserveDigests,
|
||||
OciDecryptConfig: decConfig,
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -288,7 +250,7 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = os.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
if err = ioutil.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
retryOpts *retry.RetryOptions
|
||||
}
|
||||
|
||||
func deleteCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -35,9 +35,8 @@ Supported transports:
|
||||
%s
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo delete docker://registry.example.com/example/pause:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo delete docker://registry.example.com/example/pause:latest`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
@@ -70,7 +69,7 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/cmd/skopeo/inspect"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
retryOpts *retry.RetryOptions
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
@@ -55,7 +55,6 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
Example: `skopeo inspect docker://registry.fedoraproject.org/fedora
|
||||
skopeo inspect --config docker://docker.io/alpine
|
||||
skopeo inspect --format "Name: {{.Name}} Digest: {{.Digest}}" docker://registry.access.redhat.com/ubi8`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
@@ -96,30 +95,30 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
src, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error parsing image name %q: %w", imageName, err)
|
||||
return errors.Wrapf(err, "Error parsing image name %q", imageName)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "closing image", err)
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
rawManifest, _, err = src.GetManifest(ctx, nil)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error retrieving manifest for image: %w", err)
|
||||
return errors.Wrapf(err, "Error retrieving manifest for image")
|
||||
}
|
||||
|
||||
if opts.raw && !opts.config {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %w", err)
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -127,29 +126,29 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
|
||||
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing manifest for image: %w", err)
|
||||
return errors.Wrapf(err, "Error parsing manifest for image")
|
||||
}
|
||||
|
||||
if opts.config && opts.raw {
|
||||
var configBlob []byte
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
configBlob, err = img.ConfigBlob(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error reading configuration blob: %w", err)
|
||||
return errors.Wrapf(err, "Error reading configuration blob")
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %w", err)
|
||||
return errors.Wrapf(err, "Error writing configuration blob to standard output")
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
var config *v1.Image
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
config, err = img.OCIConfig(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %w", err)
|
||||
return errors.Wrapf(err, "Error reading OCI-formatted configuration data")
|
||||
}
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
var out []byte
|
||||
@@ -163,12 +162,12 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
err = printTmpl(row, data)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %w", err)
|
||||
return errors.Wrapf(err, "Error writing OCI-formatted configuration data to standard output")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
imgInspect, err = img.Inspect(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -186,12 +185,11 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
LayersData: imgInspect.LayersData,
|
||||
Env: imgInspect.Env,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %w", err)
|
||||
return errors.Wrapf(err, "Error computing manifest digest")
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
@@ -209,7 +207,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
|
||||
// action is not allowed.
|
||||
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
|
||||
return fmt.Errorf("Error determining repository tags: %w", err)
|
||||
return errors.Wrapf(err, "Error determining repository tags")
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package inspect
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -20,6 +19,5 @@ type Output struct {
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
LayersData []types.ImageInspectLayer
|
||||
Env []string
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -13,13 +13,14 @@ import (
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
retryOpts *retry.RetryOptions
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -68,25 +69,25 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
rawSource types.ImageSource
|
||||
src types.ImageCloser
|
||||
)
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
rawSource, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
src, err = image.FromSource(ctx, sys, rawSource)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return fmt.Errorf("%w (closing image source: %v)", err, closeErr)
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "closing image", err)
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -121,7 +122,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := os.MkdirTemp(".", "layers-")
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -136,7 +137,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "closing destination", err)
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -145,7 +146,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
r io.ReadCloser
|
||||
blobSize int64
|
||||
)
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -153,14 +154,14 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return fmt.Errorf("%w (close error: %v)", err, closeErr)
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var manifest []byte
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
manifest, _, err = src.Manifest(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -3,46 +3,29 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/archive"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// tagListOutput is the output format of (skopeo list-tags), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type tagListOutput struct {
|
||||
Repository string `json:",omitempty"`
|
||||
Repository string
|
||||
Tags []string
|
||||
}
|
||||
|
||||
type tagsOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
var transportHandlers = map[string]func(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error){
|
||||
docker.Transport.Name(): listDockerRepoTags,
|
||||
archive.Transport.Name(): listDockerArchiveTags,
|
||||
}
|
||||
|
||||
// supportedTransports returns all the supported transports
|
||||
func supportedTransports(joinStr string) string {
|
||||
res := make([]string, 0, len(transportHandlers))
|
||||
for handlerName := range transportHandlers {
|
||||
res = append(res, handlerName)
|
||||
}
|
||||
sort.Strings(res)
|
||||
return strings.Join(res, joinStr)
|
||||
retryOpts *retry.RetryOptions
|
||||
}
|
||||
|
||||
func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -55,14 +38,13 @@ func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "list-tags [command options] SOURCE-IMAGE",
|
||||
Short: "List tags in the transport/repository specified by the SOURCE-IMAGE",
|
||||
Long: `Return the list of tags from the transport/repository "SOURCE-IMAGE"
|
||||
Use: "list-tags [command options] REPOSITORY-NAME",
|
||||
Short: "List tags in the transport/repository specified by the REPOSITORY-NAME",
|
||||
Long: `Return the list of tags from the transport/repository "REPOSITORY-NAME"
|
||||
|
||||
Supported transports:
|
||||
` + supportedTransports(" ") + `
|
||||
docker
|
||||
|
||||
See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
|
||||
`,
|
||||
@@ -81,12 +63,12 @@ See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
|
||||
// Would really love to not have this, but needed to enforce tag-less and digest-less names
|
||||
func parseDockerRepositoryReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, docker.Transport.Name()+"://") {
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with %s://", refString, docker.Transport.Name())
|
||||
return nil, errors.Errorf("docker: image reference %s does not start with %s://", refString, docker.Transport.Name())
|
||||
}
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, refString)
|
||||
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, refString)
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(parts[1], "//"))
|
||||
@@ -108,63 +90,11 @@ func listDockerTags(ctx context.Context, sys *types.SystemContext, imgRef types.
|
||||
|
||||
tags, err := docker.GetRepositoryTags(ctx, sys, imgRef)
|
||||
if err != nil {
|
||||
return ``, nil, fmt.Errorf("Error listing repository tags: %w", err)
|
||||
return ``, nil, fmt.Errorf("Error listing repository tags: %v", err)
|
||||
}
|
||||
return repositoryName, tags, nil
|
||||
}
|
||||
|
||||
// return the tagLists from a docker repo
|
||||
func listDockerRepoTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// return the tagLists from a docker archive file
|
||||
func listDockerArchiveTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
ref, err := alltransports.ParseImageName(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tarReader, _, err := archive.NewReaderForReference(sys, ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
imageRefs, err := tarReader.List()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var repoTags []string
|
||||
for imageIndex, items := range imageRefs {
|
||||
for _, ref := range items {
|
||||
repoTags, err = tarReader.ManifestTagsForReference(ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// handle for each untagged image
|
||||
if len(repoTags) == 0 {
|
||||
repoTags = []string{fmt.Sprintf("@%d", imageIndex)}
|
||||
}
|
||||
tagListing = append(tagListing, repoTags...)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
@@ -183,17 +113,23 @@ func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return fmt.Errorf("Invalid %q: does not specify a transport", args[0])
|
||||
}
|
||||
|
||||
if transport.Name() != docker.Transport.Name() {
|
||||
return fmt.Errorf("Unsupported transport '%v' for tag listing. Only '%v' currently supported", transport.Name(), docker.Transport.Name())
|
||||
}
|
||||
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var repositoryName string
|
||||
var tagListing []string
|
||||
|
||||
if val, ok := transportHandlers[transport.Name()]; ok {
|
||||
repositoryName, tagListing, err = val(ctx, sys, opts, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unsupported transport '%s' for tag listing. Only supported: %s",
|
||||
transport.Name(), supportedTransports(", "))
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outputData := tagListOutput{
|
||||
|
||||
@@ -63,8 +63,11 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
},
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
// Hide the completion command which is provided by cobra
|
||||
CompletionOptions: cobra.CompletionOptions{HiddenDefaultCmd: true},
|
||||
// Currently, skopeo uses manually written completions. Cobra allows
|
||||
// for auto-generating completions for various shells. Podman is
|
||||
// already making us of that. If Skopeo decides to follow, please
|
||||
// remove the line below (and hide the `completion` command).
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
// This is documented to parse "local" (non-PersistentFlags) flags of parent commands before
|
||||
// running subcommands and handling their options. We don't really run into such cases,
|
||||
// because all of our flags on rootCommand are in PersistentFlags, except for the deprecated --tls-verify;
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -31,7 +31,7 @@ func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := os.ReadFile(manifestPath)
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ const maxMsgSize = 32 * 1024
|
||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
|
||||
// We hard error if the input JSON numbers we expect to be
|
||||
// integers are above this.
|
||||
const maxJSONFloat = float64(uint64(1)<<53 - 1)
|
||||
const maxJSONFloat = float64(1<<53 - 1)
|
||||
|
||||
// request is the JSON serialization of a function call
|
||||
type request struct {
|
||||
@@ -664,14 +664,7 @@ func proxyCmd(global *globalOptions) *cobra.Command {
|
||||
// processRequest dispatches a remote request.
|
||||
// replyBuf is the result of the invocation.
|
||||
// terminate should be true if processing of requests should halt.
|
||||
func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate bool, err error) {
|
||||
var req request
|
||||
|
||||
// Parse the request JSON
|
||||
if err = json.Unmarshal(readBytes, &req); err != nil {
|
||||
err = fmt.Errorf("invalid request: %v", err)
|
||||
return
|
||||
}
|
||||
func (h *proxyHandler) processRequest(req request) (rb replyBuf, terminate bool, err error) {
|
||||
// Dispatch on the method
|
||||
switch req.Method {
|
||||
case "Initialize":
|
||||
@@ -724,15 +717,18 @@ func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
return fmt.Errorf("reading socket: %v", err)
|
||||
}
|
||||
// Parse the request JSON
|
||||
readbuf := buf[0:n]
|
||||
var req request
|
||||
if err := json.Unmarshal(readbuf, &req); err != nil {
|
||||
rb := replyBuf{}
|
||||
rb.send(conn, fmt.Errorf("invalid request: %v", err))
|
||||
}
|
||||
|
||||
rb, terminate, err := handler.processRequest(readbuf)
|
||||
rb, terminate, err := handler.processRequest(req)
|
||||
if terminate {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := rb.send(conn, err); err != nil {
|
||||
return fmt.Errorf("writing to socket: %w", err)
|
||||
}
|
||||
rb.send(conn, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/signature"
|
||||
@@ -39,7 +39,7 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading %s: %v", manifestPath, err)
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
@@ -89,11 +89,11 @@ func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := os.ReadFile(manifestPath)
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
unverifiedSignature, err := os.ReadFile(signaturePath)
|
||||
unverifiedSignature, err := ioutil.ReadFile(signaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signature from %s: %v", signaturePath, err)
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer)
|
||||
}
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := os.ReadFile(untrustedSignaturePath)
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading untrusted signature from %s: %v", untrustedSignaturePath, err)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -24,8 +25,9 @@ const (
|
||||
// Test that results of runSkopeo failed with nothing on stdout, and substring
|
||||
// within the error message.
|
||||
func assertTestFailed(t *testing.T, stdout string, err error, substring string) {
|
||||
assert.ErrorContains(t, err, substring)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, stdout)
|
||||
assert.Contains(t, err.Error(), substring)
|
||||
}
|
||||
|
||||
func TestStandaloneSign(t *testing.T) {
|
||||
@@ -38,7 +40,8 @@ func TestStandaloneSign(t *testing.T) {
|
||||
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/manifest"
|
||||
t.Setenv("GNUPGHOME", "fixtures")
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
@@ -75,7 +78,7 @@ func TestStandaloneSign(t *testing.T) {
|
||||
assertTestFailed(t, out, err, "/dev/full")
|
||||
|
||||
// Success
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err = runSkopeo("standalone-sign", "-o", sigOutput.Name(),
|
||||
@@ -83,9 +86,9 @@ func TestStandaloneSign(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
sig, err := os.ReadFile(sigOutput.Name())
|
||||
sig, err := ioutil.ReadFile(sigOutput.Name())
|
||||
require.NoError(t, err)
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
mech, err = signature.NewGPGSigningMechanism()
|
||||
require.NoError(t, err)
|
||||
@@ -100,7 +103,8 @@ func TestStandaloneVerify(t *testing.T) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
signaturePath := "fixtures/image.signature"
|
||||
dockerReference := "testing/manifest"
|
||||
t.Setenv("GNUPGHOME", "fixtures")
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
defer os.Unsetenv("GNUPGHOME")
|
||||
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
|
||||
@@ -2,10 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
@@ -29,23 +29,21 @@ import (
|
||||
|
||||
// syncOptions contains information retrieved from the skopeo sync command line.
|
||||
type syncOptions struct {
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.Options
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
dryRun bool // Don't actually copy anything, just output what it would have done
|
||||
preserveDigests bool // Preserve digests during sync
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
preserveDigests bool // Preserve digests during sync
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
@@ -106,14 +104,12 @@ See skopeo-sync(1) for details.
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "File that contains a passphrase for the --sign-by key")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.dryRun, "dry-run", false, "Run without actually copying data")
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVarP(&opts.keepGoing, "keep-going", "", false, "Do not abort the sync if any image copy fails")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
@@ -142,13 +138,13 @@ func (tls *tlsVerifyConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
||||
// It returns a new unmarshaled sourceConfig object and any error encountered.
|
||||
func newSourceConfig(yamlFile string) (sourceConfig, error) {
|
||||
var cfg sourceConfig
|
||||
source, err := os.ReadFile(yamlFile)
|
||||
source, err := ioutil.ReadFile(yamlFile)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
err = yaml.Unmarshal(source, &cfg)
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("Failed to unmarshal %q: %w", yamlFile, err)
|
||||
return cfg, errors.Wrapf(err, "Failed to unmarshal %q", yamlFile)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -160,7 +156,7 @@ func parseRepositoryReference(input string) (reference.Named, error) {
|
||||
return nil, err
|
||||
}
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return nil, errors.New("input names a reference, not a repository")
|
||||
return nil, errors.Errorf("input names a reference, not a repository")
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
@@ -178,24 +174,24 @@ func destinationReference(destination string, transport string) (types.ImageRefe
|
||||
case directory.Transport.Name():
|
||||
_, err := os.Stat(destination)
|
||||
if err == nil {
|
||||
return nil, fmt.Errorf("Refusing to overwrite destination directory %q", destination)
|
||||
return nil, errors.Errorf("Refusing to overwrite destination directory %q", destination)
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("Destination directory could not be used: %w", err)
|
||||
return nil, errors.Wrap(err, "Destination directory could not be used")
|
||||
}
|
||||
// the directory holding the image must be created here
|
||||
if err = os.MkdirAll(destination, 0755); err != nil {
|
||||
return nil, fmt.Errorf("Error creating directory for image %s: %w", destination, err)
|
||||
return nil, errors.Wrapf(err, "Error creating directory for image %s", destination)
|
||||
}
|
||||
imageTransport = directory.Transport
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid destination transport", transport)
|
||||
return nil, errors.Errorf("%q is not a valid destination transport", transport)
|
||||
}
|
||||
logrus.Debugf("Destination for transport %q: %s", transport, destination)
|
||||
|
||||
destRef, err := imageTransport.ParseReference(destination)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", imageTransport.Name(), destination, err)
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", imageTransport.Name(), destination)
|
||||
}
|
||||
|
||||
return destRef, nil
|
||||
@@ -215,16 +211,16 @@ func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef refe
|
||||
return nil, err // Should never happen for a reference with tag and no digest
|
||||
}
|
||||
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
|
||||
if err != nil {
|
||||
var unauthorizedForCredentials docker.ErrUnauthorizedForCredentials
|
||||
if errors.As(err, &unauthorizedForCredentials) {
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
tags = nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Error determining repository tags for image %s: %w", name, err)
|
||||
}
|
||||
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case docker.ErrUnauthorizedForCredentials:
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
default:
|
||||
return tags, errors.Wrapf(err, "Error determining repository tags for image %s", name)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
@@ -244,15 +240,11 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
for _, tag := range tags {
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"repo": repoRef.Name(),
|
||||
"tag": tag,
|
||||
}).Errorf("Error creating a tagged reference from registry tag list: %v", err)
|
||||
continue
|
||||
return nil, errors.Wrapf(err, "Error creating a reference for repository %s and tag %q", repoRef.Name(), tag)
|
||||
}
|
||||
ref, err := docker.NewReference(taggedRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %s: %w", docker.Transport.Name(), taggedRef.String(), err)
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %s", docker.Transport.Name(), taggedRef.String())
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
}
|
||||
@@ -265,15 +257,15 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
// and any error encountered.
|
||||
func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
var sourceReferences []types.ImageReference
|
||||
err := filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error {
|
||||
err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
dirname := filepath.Dir(path)
|
||||
ref, err := directory.Transport.ParseReference(dirname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", directory.Transport.Name(), dirname, err)
|
||||
return errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", directory.Transport.Name(), dirname)
|
||||
}
|
||||
sourceReferences = append(sourceReferences, ref)
|
||||
return filepath.SkipDir
|
||||
@@ -283,7 +275,7 @@ func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
|
||||
if err != nil {
|
||||
return sourceReferences,
|
||||
fmt.Errorf("Error walking the path %q: %w", dirPath, err)
|
||||
errors.Wrapf(err, "Error walking the path %q", dirPath)
|
||||
}
|
||||
|
||||
return sourceReferences, nil
|
||||
@@ -441,7 +433,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
}
|
||||
named, err := reference.ParseNormalizedNamed(source) // May be a repository or an image.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", docker.Transport.Name(), source, err)
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), source)
|
||||
}
|
||||
imageTagged := !reference.IsNameOnly(named)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
@@ -451,7 +443,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
if imageTagged {
|
||||
srcRef, err := docker.NewReference(named)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot obtain a valid image reference for transport %q and reference %q: %w", docker.Transport.Name(), named.String(), err)
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), named.String())
|
||||
}
|
||||
desc.ImageRefs = []types.ImageReference{srcRef}
|
||||
} else {
|
||||
@@ -460,7 +452,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, fmt.Errorf("No images to sync found in %q", source)
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
@@ -471,7 +463,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
}
|
||||
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return descriptors, fmt.Errorf("Invalid source directory specified: %w", err)
|
||||
return descriptors, errors.Wrap(err, "Invalid source directory specified")
|
||||
}
|
||||
desc.DirBasePath = source
|
||||
var err error
|
||||
@@ -480,7 +472,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, fmt.Errorf("No images to sync found in %q", source)
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
|
||||
@@ -499,7 +491,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
|
||||
descs, err := imagesToCopyFromRegistry(registryName, registryConfig, *sourceCtx)
|
||||
if err != nil {
|
||||
return descriptors, fmt.Errorf("Failed to retrieve list of images from registry %q: %w", registryName, err)
|
||||
return descriptors, errors.Wrapf(err, "Failed to retrieve list of images from registry %q", registryName)
|
||||
}
|
||||
descriptors = append(descriptors, descs...)
|
||||
}
|
||||
@@ -508,7 +500,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
return descriptors, nil
|
||||
}
|
||||
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
@@ -516,13 +508,9 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %w", err)
|
||||
return errors.Wrapf(err, "Error loading trust policy")
|
||||
}
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "tearing down policy context", err)
|
||||
}
|
||||
}()
|
||||
defer policyContext.Destroy()
|
||||
|
||||
// validate source and destination options
|
||||
contains := func(val string, list []string) (_ bool) {
|
||||
@@ -538,22 +526,20 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return errors.New("A source transport must be specified")
|
||||
}
|
||||
if !contains(opts.source, []string{docker.Transport.Name(), directory.Transport.Name(), "yaml"}) {
|
||||
return fmt.Errorf("%q is not a valid source transport", opts.source)
|
||||
return errors.Errorf("%q is not a valid source transport", opts.source)
|
||||
}
|
||||
|
||||
if len(opts.destination) == 0 {
|
||||
return errors.New("A destination transport must be specified")
|
||||
}
|
||||
if !contains(opts.destination, []string{docker.Transport.Name(), directory.Transport.Name()}) {
|
||||
return fmt.Errorf("%q is not a valid destination transport", opts.destination)
|
||||
return errors.Errorf("%q is not a valid destination transport", opts.destination)
|
||||
}
|
||||
|
||||
if opts.source == opts.destination && opts.source == directory.Transport.Name() {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(transports.Get(opts.destination))
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
@@ -577,7 +563,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
|
||||
sourceArg := args[0]
|
||||
var srcRepoList []repoDescriptor
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -590,32 +576,14 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// c/image/copy.Image does allow creating both simple signing and sigstore signatures simultaneously,
|
||||
// with independent passphrases, but that would make the CLI probably too confusing.
|
||||
// For now, use the passphrase with either, but only one of them.
|
||||
if opts.signPassphraseFile != "" && opts.signByFingerprint != "" && opts.signBySigstorePrivateKey != "" {
|
||||
return fmt.Errorf("Only one of --sign-by and sign-by-sigstore-private-key can be used with sign-passphrase-file")
|
||||
}
|
||||
var passphrase string
|
||||
if opts.signPassphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} else if opts.signBySigstorePrivateKey != "" {
|
||||
p, err := promptForPassphrase(opts.signBySigstorePrivateKey, os.Stdin, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
passphrase, err := cli.ReadPassphraseFile(opts.signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options := copy.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
|
||||
ReportWriter: os.Stdout,
|
||||
DestinationCtx: destinationCtx,
|
||||
ImageListSelection: imageListSelection,
|
||||
@@ -625,10 +593,6 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
errorsPresent := false
|
||||
imagesNumber := 0
|
||||
if opts.dryRun {
|
||||
logrus.Warn("Running in dry-run mode")
|
||||
}
|
||||
|
||||
for _, srcRepo := range srcRepoList {
|
||||
options.SourceCtx = srcRepo.Context
|
||||
for counter, ref := range srcRepo.ImageRefs {
|
||||
@@ -655,37 +619,29 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return err
|
||||
}
|
||||
|
||||
fromToFields := logrus.Fields{
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"from": transports.ImageName(ref),
|
||||
"to": transports.ImageName(destRef),
|
||||
}
|
||||
if opts.dryRun {
|
||||
logrus.WithFields(fromToFields).Infof("Would have copied image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
} else {
|
||||
logrus.WithFields(fromToFields).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
return fmt.Errorf("Error copying ref %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
// log the error, keep a note that there was a failure and move on to the next
|
||||
// image ref
|
||||
errorsPresent = true
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
return errors.Wrapf(err, "Error copying ref %q", transports.ImageName(ref))
|
||||
}
|
||||
// log the error, keep a note that there was a failure and move on to the next
|
||||
// image ref
|
||||
errorsPresent = true
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}
|
||||
imagesNumber++
|
||||
}
|
||||
}
|
||||
|
||||
if opts.dryRun {
|
||||
logrus.Infof("Would have synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
} else {
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
}
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
if !errorsPresent {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
@@ -23,7 +22,7 @@ func maybeReexec() error {
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading the current capabilities sets: %w", err)
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -10,16 +9,15 @@ import (
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
@@ -27,27 +25,6 @@ type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// noteCloseFailure returns (possibly-nil) err modified to account for (non-nil) closeErr.
|
||||
// The error for closeErr is annotated with description (which is not a format string)
|
||||
// Typical usage:
|
||||
//
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
func noteCloseFailure(err error, description string, closeErr error) error {
|
||||
// We don’t accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
|
||||
// This also makes it harder for a caller to do
|
||||
// defer noteCloseFailure(returnedErr, …)
|
||||
// which doesn’t use the right value of returnedErr, and doesn’t update it.
|
||||
if err == nil {
|
||||
return fmt.Errorf("%s: %w", description, closeErr)
|
||||
}
|
||||
// In this case we prioritize the primary error for use with %w; closeErr is usually less relevant, or might be a consequence of the primary erorr.
|
||||
return fmt.Errorf("%w (%s: %v)", err, description, closeErr)
|
||||
}
|
||||
|
||||
// commandAction intermediates between the RunE interface and the real handler,
|
||||
// primarily to ensure that cobra.Command is not available to the handler, which in turn
|
||||
// makes sure that the cmd.Flags() etc. flag access functions are not used,
|
||||
@@ -56,9 +33,8 @@ func noteCloseFailure(err error, description string, closeErr error) error {
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd *cobra.Command, args []string) error {
|
||||
return func(c *cobra.Command, args []string) error {
|
||||
err := handler(args, c.OutOrStdout())
|
||||
var shouldDisplayUsage errorShouldDisplayUsage
|
||||
if errors.As(err, &shouldDisplayUsage) {
|
||||
return c.Help()
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
c.Help()
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -175,8 +151,8 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLS
|
||||
return fs, opts
|
||||
}
|
||||
|
||||
func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
|
||||
opts := retry.RetryOptions{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
return fs, &opts
|
||||
@@ -245,7 +221,6 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
// Every user should call imageDestOptions.warnAboutIneffectiveOptions() as part of handling the CLI
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
@@ -254,13 +229,12 @@ type imageDestOptions struct {
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel commonFlag.OptionalInt // Level to use for the compression
|
||||
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
|
||||
imageDestFlagPrefix string
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions, imageDestFlagPrefix: flagPrefix}
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
@@ -298,19 +272,6 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// warnAboutIneffectiveOptions warns if any ineffective option was set by the user
|
||||
// Every user should call this as part of handling the CLI
|
||||
func (opts *imageDestOptions) warnAboutIneffectiveOptions(destTransport types.ImageTransport) {
|
||||
if destTransport.Name() != directory.Transport.Name() {
|
||||
if opts.dirForceCompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"compress")
|
||||
}
|
||||
if opts.dirForceDecompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"decompress")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
@@ -393,19 +354,3 @@ func adjustUsage(c *cobra.Command) {
|
||||
c.SetUsageTemplate(usageTemplate)
|
||||
c.DisableFlagsInUseLine = true
|
||||
}
|
||||
|
||||
// promptForPassphrase interactively prompts for a passphrase related to privateKeyFile
|
||||
func promptForPassphrase(privateKeyFile string, stdin, stdout *os.File) (string, error) {
|
||||
stdinFd := int(stdin.Fd())
|
||||
if !term.IsTerminal(stdinFd) {
|
||||
return "", fmt.Errorf("Cannot prompt for a passphrase for key %s, standard input is not a TTY", privateKeyFile)
|
||||
}
|
||||
|
||||
fmt.Fprintf(stdout, "Passphrase for key %s: ", privateKeyFile)
|
||||
passphrase, err := term.ReadPassword(stdinFd)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading password: %w", err)
|
||||
}
|
||||
fmt.Fprintf(stdout, "\n")
|
||||
return string(passphrase), nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@@ -13,27 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNoteCloseFailure(t *testing.T) {
|
||||
const description = "description"
|
||||
|
||||
mainErr := errors.New("main")
|
||||
closeErr := errors.New("closing")
|
||||
|
||||
// Main success, closing failed
|
||||
res := noteCloseFailure(nil, description, closeErr)
|
||||
require.NotNil(t, res)
|
||||
assert.Contains(t, res.Error(), description)
|
||||
assert.Contains(t, res.Error(), closeErr.Error())
|
||||
|
||||
// Both main and closing failed
|
||||
res = noteCloseFailure(mainErr, description, closeErr)
|
||||
require.NotNil(t, res)
|
||||
assert.Contains(t, res.Error(), mainErr.Error())
|
||||
assert.Contains(t, res.Error(), description)
|
||||
assert.Contains(t, res.Error(), closeErr.Error())
|
||||
assert.ErrorIs(t, res, mainErr)
|
||||
}
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) (*globalOptions, *cobra.Command) {
|
||||
app, opts := createApp()
|
||||
@@ -149,9 +128,17 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
oldXRD, hasXRD := os.LookupEnv("REGISTRY_AUTH_FILE")
|
||||
defer func() {
|
||||
if hasXRD {
|
||||
os.Setenv("REGISTRY_AUTH_FILE", oldXRD)
|
||||
} else {
|
||||
os.Unsetenv("REGISTRY_AUTH_FILE")
|
||||
}
|
||||
}()
|
||||
authFile := "/tmp/auth.json"
|
||||
// Make sure when REGISTRY_AUTH_FILE is set the auth file is used
|
||||
t.Setenv("REGISTRY_AUTH_FILE", authFile)
|
||||
os.Setenv("REGISTRY_AUTH_FILE", authFile)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{
|
||||
|
||||
341
completions/bash/skopeo
Normal file
341
completions/bash/skopeo
Normal file
@@ -0,0 +1,341 @@
|
||||
#! /bin/bash
|
||||
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" ] || [ "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$boolean_options $options_with_args" -- "$cur")
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$transports" -- "$cur")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
skopeo "$subcommand" --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--src-authfile
|
||||
--dest-authfile
|
||||
--format -f
|
||||
--multi-arch
|
||||
--sign-by
|
||||
--sign-passphrase-file
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-dir
|
||||
--dest-tls-verify
|
||||
--src-daemon-host
|
||||
--dest-daemon-host
|
||||
--src-registry-token
|
||||
--dest-registry-token
|
||||
--src-username
|
||||
--src-password
|
||||
--dest-username
|
||||
--dest-password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all
|
||||
--dest-compress
|
||||
--dest-decompress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
--dest-oci-accept-uncompressed-layers
|
||||
--dest-precompute-digests
|
||||
--preserve-digests
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_sync() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--dest
|
||||
--dest-authfile
|
||||
--dest-cert-
|
||||
--dest-creds
|
||||
--dest-registry-token string
|
||||
--format
|
||||
--retry-times
|
||||
--sign-by
|
||||
--sign-passphrase-file
|
||||
--src
|
||||
--src-authfile
|
||||
--src-cert-dir
|
||||
--src-creds
|
||||
--src-registry-token
|
||||
--src-username
|
||||
--src-password
|
||||
--dest-username
|
||||
--dest-password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all
|
||||
--dest-no-creds
|
||||
--dest-tls-verify
|
||||
--remove-signatures
|
||||
--scoped
|
||||
--src-no-creds
|
||||
--src-tls-verify
|
||||
--keep-going
|
||||
--preserve-digests
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--format
|
||||
--retry-times
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
--no-tags -n
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
--passphrase-file
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_standalone_verify() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_manifest_digest() {
|
||||
local options_with_args="
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_delete() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_list_repository_tags() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_login() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--password -p
|
||||
--username -u
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--get-login
|
||||
--tls-verify
|
||||
--password-stdin
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_logout() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all -a
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
# XXX: Changes here need to be reflected in the manually expanded
|
||||
# string in the `case` statement below as well.
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--override-variant
|
||||
--command-timeout
|
||||
--tmpdir
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
|
||||
local commands=(
|
||||
copy
|
||||
delete
|
||||
inspect
|
||||
list-tags
|
||||
login
|
||||
logout
|
||||
manifest-digest
|
||||
standalone-sign
|
||||
standalone-verify
|
||||
sync
|
||||
help
|
||||
h
|
||||
)
|
||||
|
||||
case "$prev" in
|
||||
# XXX: Changes here need to be reflected in $options_with_args as well.
|
||||
--policy|--registries.d|--override-arch|--override-os|--override-variant|--command-timeout)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "$boolean_options $options_with_args" -- "$cur")
|
||||
;;
|
||||
*)
|
||||
while IFS='' read -r line; do COMPREPLY+=("$line"); done < <(compgen -W "${commands[*]} help" -- "$cur")
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur
|
||||
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
local command="skopeo" cpos=0
|
||||
local counter=1
|
||||
while [ $counter -lt "$cword" ]; do
|
||||
case "${words[$counter]}" in
|
||||
skopeo|copy|sync|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
|
||||
command="${words[$counter]//-/_}"
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
declare -F "$completions_func" >/dev/null && $completions_func
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete skopeo
|
||||
@@ -6,6 +6,17 @@
|
||||
|
||||
set -e
|
||||
|
||||
_EOL=20270501
|
||||
if [[ $(date +%Y%m%d) -ge $_EOL ]]; then
|
||||
die "As of $_EOL this branch is probably
|
||||
no longer supported in RHEL 9.0/8.8, please
|
||||
confirm this with RHEL Program Management. If so:
|
||||
It should be removed from Cirrus-Cron,
|
||||
the .cirrus.yml file removed, and
|
||||
the VM images (manually) unmarked
|
||||
'permanent=true'"
|
||||
fi
|
||||
|
||||
# BEGIN Global export of all variables
|
||||
set -a
|
||||
|
||||
@@ -55,9 +66,6 @@ _run_setup() {
|
||||
# VM's come with the distro. skopeo package pre-installed
|
||||
dnf erase -y skopeo
|
||||
|
||||
# Required for testing the SIF transport
|
||||
dnf install -y fakeroot squashfs-tools
|
||||
|
||||
msg "Removing systemd-resolved from nsswitch.conf"
|
||||
# /etc/resolv.conf is already set to bypass systemd-resolvd
|
||||
sed -i -r -e 's/^(hosts.+)resolve.+dns/\1dns/' /etc/nsswitch.conf
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (ANY changes made to this file, once commited/merged must)
|
||||
[comment]: <> (be manually copy/pasted -in markdown- into the description)
|
||||
[comment]: <> (field on Quay at the following locations:)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (https://quay.io/repository/containers/skopeo)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/stable)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/testing)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/upstream)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
@@ -19,7 +6,7 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains the Containerfiles necessary to create the skopeoimage container
|
||||
This directory contains the Dockerfiles necessary to create the skopeoimage container
|
||||
images that are housed on quay.io under the skopeo account. All repositories where
|
||||
the images live are public and can be pulled without credentials. These container images are secured and the
|
||||
resulting containers can run safely with privileges within the container.
|
||||
@@ -31,23 +18,22 @@ default to `/`.
|
||||
|
||||
The container images are:
|
||||
|
||||
* `quay.io/containers/skopeo:v<version>` and `quay.io/skopeo/stable:v<version>` -
|
||||
These images are built daily. These images are intended contain an unchanging
|
||||
and stable version of skopeo. For the most recent `<version>` tags (`vX`,
|
||||
`vX.Y`, and `vX.Y.Z`) the image contents will be updated daily to incorporate
|
||||
(especially) security updates. For build details, please[see the configuration
|
||||
file](stable/Containerfile).
|
||||
* `quay.io/containers/skopeo:<version>` and `quay.io/skopeo/stable:<version>` -
|
||||
These images are built when a new Skopeo version becomes available in
|
||||
Fedora. These images are intended to be unchanging and stable, they will
|
||||
never be updated by automation once they've been pushed. For build details,
|
||||
please [see the configuration file](stable/Dockerfile).
|
||||
* `quay.io/containers/skopeo:latest` and `quay.io/skopeo/stable:latest` -
|
||||
Built daily using the same Containerfile as above. The skopeo version
|
||||
will remain the "latest" available in Fedora, however the other image
|
||||
Built daily using the same Dockerfile as above. The skopeo version
|
||||
will remain the "latest" available in Fedora, however the image
|
||||
contents may vary compared to the version-tagged images.
|
||||
* `quay.io/skopeo/testing:latest` - This image is built daily, using the
|
||||
latest version of Skopeo that was in the Fedora `updates-testing` repository.
|
||||
The image is Built with [the testing Containerfile](testing/Containerfile).
|
||||
The image is Built with [the testing Dockerfile](testing/Dockerfile).
|
||||
* `quay.io/skopeo/upstream:latest` - This image is built daily using the latest
|
||||
code found in this GitHub repository. Due to the image changing frequently,
|
||||
it's not guaranteed to be stable or even executable. The image is built with
|
||||
[the upstream Containerfile](upstream/Containerfile).
|
||||
[the upstream Dockerfile](upstream/Dockerfile).
|
||||
|
||||
|
||||
## Sample Usage
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# stable/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# stable version of Skopeo on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install skopeo fuse-overlayfs \
|
||||
--exclude container-selinux && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
33
contrib/skopeoimage/stable/Dockerfile
Normal file
33
contrib/skopeoimage/stable/Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
# stable/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# stable version of Skopeo on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
@@ -1,49 +0,0 @@
|
||||
# testing/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# version of Skopeo that is in updates-testing
|
||||
# on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install skopeo fuse-overlayfs \
|
||||
--exclude container-selinux \
|
||||
--enablerepo updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
34
contrib/skopeoimage/testing/Dockerfile
Normal file
34
contrib/skopeoimage/testing/Dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
# testing/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# version of Skopeo that is in updates-testing
|
||||
# on the Fedoras Updates System.
|
||||
# https://bodhi.fedoraproject.org/updates/?search=skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
@@ -1,50 +0,0 @@
|
||||
# upstream/Containerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# upstream version of Skopeo on GitHub.
|
||||
# https://github.com/containers/skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
|
||||
dnf -y copr enable rhcontainerbot/podman-next && \
|
||||
dnf -y install skopeo \
|
||||
--exclude container-selinux \
|
||||
--enablerepo=updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
RUN useradd skopeo && \
|
||||
echo skopeo:100000:65536 > /etc/subuid && \
|
||||
echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
54
contrib/skopeoimage/upstream/Dockerfile
Normal file
54
contrib/skopeoimage/upstream/Dockerfile
Normal file
@@ -0,0 +1,54 @@
|
||||
# upstream/Dockerfile
|
||||
#
|
||||
# Build a Skopeo container image from the latest
|
||||
# upstream version of Skopeo on GitHub.
|
||||
# https://github.com/containers/skopeo
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
# up space. Also reinstall shadow-utils as without
|
||||
# doing so, the setuid/setgid bits on newuidmap
|
||||
# and newgidmap are lost in the Fedora images.
|
||||
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; \
|
||||
yum -y install make \
|
||||
golang \
|
||||
git \
|
||||
go-md2man \
|
||||
fuse-overlayfs \
|
||||
fuse3 \
|
||||
containers-common \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel --enablerepo updates-testing --exclude container-selinux; \
|
||||
mkdir /root/skopeo; \
|
||||
git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/containers/skopeo; \
|
||||
export GOPATH=/root/skopeo; \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo; \
|
||||
make bin/skopeo;\
|
||||
make PREFIX=/usr install;\
|
||||
rm -rf /root/skopeo/*; \
|
||||
yum -y remove git golang go-md2man make; \
|
||||
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
|
||||
|
||||
# Adjust storage.conf to enable Fuse storage.
|
||||
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
|
||||
|
||||
# Setup the ability to use additional stores
|
||||
# with this container image.
|
||||
RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock
|
||||
|
||||
# Setup skopeo's uid/guid entries
|
||||
RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
20
default.yaml
20
default.yaml
@@ -1,21 +1,19 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# lookaside: for reading/writing simple signing signatures
|
||||
# lookaside-staging: for writing simple signing signatures, preferred over lookaside
|
||||
# sigstore: indicates a location that is read and write
|
||||
# sigstore-staging: indicates a location that is only for write
|
||||
#
|
||||
# lookaside and lookaside-staging take a value of the following:
|
||||
# lookaside: {schema}://location
|
||||
# sigstore and sigstore-staging take a value of the following:
|
||||
# sigstore: {schema}://location
|
||||
#
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# The default locations are built-in, for both reading and writing:
|
||||
# /var/lib/containers/sigstore for root, or
|
||||
# ~/.local/share/containers/sigstore for non-root users.
|
||||
# This is the default signature write location for docker registries.
|
||||
default-docker:
|
||||
# lookaside: https://…
|
||||
# lookaside-staging: file:///…
|
||||
# sigstore: file:///var/lib/containers/sigstore
|
||||
sigstore-staging: file:///var/lib/containers/sigstore
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
@@ -23,6 +21,6 @@ default-docker:
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# lookaside: https://privateregistry.com/sigstore/
|
||||
# lookaside-staging: /mnt/nfs/privateregistry/sigstore
|
||||
# sigstore: http://privateregistry.com/sigstore/
|
||||
# sigstore-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--preserve-digests**
|
||||
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
|
||||
**--encrypt-layer** _ints_
|
||||
|
||||
@@ -70,7 +70,7 @@ MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifes
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--multi-arch** _option_
|
||||
**--multi-arch**
|
||||
|
||||
Control what is copied if _source-image_ refers to a multi-architecture image. Default is system.
|
||||
|
||||
@@ -89,21 +89,13 @@ Suppress output information when copying images.
|
||||
|
||||
Do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by** _key-id_
|
||||
**--sign-by**=_key-id_
|
||||
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
Add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
**--sign-passphrase-file**=_path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-passphrase-file** _path_
|
||||
|
||||
The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-private-key`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--sign-identity** _reference_
|
||||
|
||||
The identity to use when signing the image. The identity must be a fully specified docker reference. If the identity is not specified, the target docker reference will be used.
|
||||
The passphare to use when signing with the key ID from `--sign-by`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--src-shared-blob-dir** _directory_
|
||||
|
||||
|
||||
@@ -6,27 +6,17 @@ skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garb
|
||||
## SYNOPSIS
|
||||
**skopeo delete** [*options*] _image-name_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Mark _image-name_ for deletion.
|
||||
The effect of this is registry-specific; many registries don’t support this operation, or don’t allow it in some circumstances / configurations.
|
||||
|
||||
**WARNING**: If _image-name_ contains a digest, this affects the referenced manifest, and may delete all tags (within the current repository?) pointing to that manifest.
|
||||
|
||||
**WARNING**: If _image-name_ contains a tag (but not a digest), in the current version of Skopeo this resolves the tag into a digest, and then deletes the manifest by digest, as described above (possibly deleting all tags pointing to that manifest, not just the provided tag). This behavior may change in the future.
|
||||
|
||||
|
||||
When using the github.com/distribution/distribution registry server:
|
||||
To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
```
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
```
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
## OPTIONS
|
||||
@@ -86,7 +76,7 @@ The password to access the registry.
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete docker://registry.example.com/example/pause:latest
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-list-tags(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-list\-tags - List image names in a transport-specific collection of images.
|
||||
skopeo\-list\-tags - List tags in the transport-specific image repository.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo list-tags** [*options*] _source-image_
|
||||
**skopeo list-tags** [*options*] _repository-name_
|
||||
|
||||
Return a list of tags from _source-image_ in a registry or a local docker-archive file.
|
||||
Return a list of tags from _repository-name_ in a registry.
|
||||
|
||||
_source-image_ name of the repository to retrieve a tag listing from or a local docker-archive file.
|
||||
_repository-name_ name of repository to retrieve tag listing from
|
||||
|
||||
## OPTIONS
|
||||
|
||||
@@ -53,7 +53,7 @@ The password to access the registry.
|
||||
|
||||
## REPOSITORY NAMES
|
||||
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags".
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.
|
||||
|
||||
This commands refers to repositories using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
@@ -72,8 +72,6 @@ This commands refers to repositories using a _transport_`:`_details_ format. The
|
||||
"docker.io/myuser/myimage:v1.0"
|
||||
"docker.io/myuser/myimage@sha256:f48c4cc192f4c3c6a069cb5cca6d0a9e34d6076ba7c214fd0cc3ca60e0af76bb"
|
||||
|
||||
**docker-archive:path[:docker-reference]
|
||||
more than one images were stored in a docker save-formatted file.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -123,48 +121,8 @@ $ skopeo list-tags docker://localhost:5000/fedora
|
||||
|
||||
```
|
||||
|
||||
### Docker-archive Transport
|
||||
|
||||
To list the tags in a local docker-archive file:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"busybox:1.28.3"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Also supports more than one tags in an archive:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"example.com/empty:latest",
|
||||
"example.com/empty/but:different"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Will include a source-index entry for each untagged image:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/four-tags-with-an-untag.tar
|
||||
{
|
||||
"Tags": [
|
||||
"image1:tag1",
|
||||
"image2:tag2",
|
||||
"@2",
|
||||
"image4:tag4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-transports(1)
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-sync(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-sync - Synchronize images between registry repositories and local directories.
|
||||
skopeo\-sync - Synchronize images between container registries and local directories.
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between registry repoositories and local directories.
|
||||
Synchronize images between container registries and local directories.
|
||||
The synchronization is achieved by copying all the images found at _source_ to _destination_.
|
||||
|
||||
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
|
||||
@@ -50,10 +50,6 @@ Path of the authentication file for the source registry. Uses path given by `--a
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dry-run**
|
||||
|
||||
Run the sync without actually copying data to the destination.
|
||||
|
||||
**--src**, **-s** _transport_ Transport for the source repository.
|
||||
|
||||
**--dest**, **-d** _transport_ Destination transport.
|
||||
@@ -66,21 +62,13 @@ Print usage statement.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by** _key-id_
|
||||
**--sign-by**=_key-id_ Add a signature using that key ID for an image name corresponding to _destination-image_.
|
||||
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-passphrase-file** _path_
|
||||
|
||||
The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-private-key`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
**--sign-passphrase-file**=_path_ The passphare to use when signing with the key ID from `--sign-by`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry.
|
||||
|
||||
|
||||
@@ -102,13 +102,13 @@ Print the version number
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List image names in a transport-specific collection of images.|
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List tags in the transport-specific image repository. |
|
||||
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between registry repositories and local directories. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
|
||||
598
docs/skopeo.svg
598
docs/skopeo.svg
@@ -1,74 +1,546 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
<svg width="168.71024mm" height="145.54036mm" viewBox="0 0 168.71024 145.54036" version="1.1" id="svg2674" inkscape:version="1.2 (dc2aedaf03, 2022-05-15)" sodipodi:docname="skopeo-badge-full-vert.svg" inkscape:export-filename="skopeo-badge-full-vert.png" inkscape:export-xdpi="51.86108" inkscape:export-ydpi="51.86108" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<defs id="defs2668">
|
||||
<inkscape:path-effect is_visible="true" id="path-effect10334" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect10336" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect9986" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect9984" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect10300" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect10304" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect124972" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect124976" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163593" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163605" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163611" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163615" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163619" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163629" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163633" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163651" effect="spiro" lpeversion="0"/>
|
||||
<inkscape:path-effect effect="spiro" id="path-effect163655" is_visible="true" lpeversion="0"/>
|
||||
<inkscape:path-effect is_visible="true" id="path-effect163597" effect="spiro" lpeversion="0"/>
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="480.61456"
|
||||
height="472.66098"
|
||||
viewBox="0 0 127.1626 125.05822"
|
||||
version="1.1"
|
||||
id="svg8"
|
||||
inkscape:version="0.92.2 5c3e80d, 2017-08-06"
|
||||
sodipodi:docname="skopeo.svg"
|
||||
inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/skopeo.color.png"
|
||||
inkscape:export-xdpi="90"
|
||||
inkscape:export-ydpi="90">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84477">
|
||||
<stop
|
||||
style="stop-color:#0093d9;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84473" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84475" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84469">
|
||||
<stop
|
||||
style="stop-color:#f6e6c8;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84465" />
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84467" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84461">
|
||||
<stop
|
||||
style="stop-color:#bfdce8;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84457" />
|
||||
<stop
|
||||
style="stop-color:#2a72ac;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84459" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84420">
|
||||
<stop
|
||||
style="stop-color:#a7a9ac;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84416" />
|
||||
<stop
|
||||
style="stop-color:#e7e8e9;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84418" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84347">
|
||||
<stop
|
||||
style="stop-color:#2c2d2f;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84343" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84345" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84339">
|
||||
<stop
|
||||
style="stop-color:#002442;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84335" />
|
||||
<stop
|
||||
style="stop-color:#151617;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84337" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84331">
|
||||
<stop
|
||||
style="stop-color:#003d6e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84327" />
|
||||
<stop
|
||||
style="stop-color:#59b5ff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84329" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84323">
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84319" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84321" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84325"
|
||||
x1="221.5741"
|
||||
y1="250.235"
|
||||
x2="219.20772"
|
||||
y2="221.99771"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84331"
|
||||
id="linearGradient84333"
|
||||
x1="223.23239"
|
||||
y1="212.83418"
|
||||
x2="245.52328"
|
||||
y2="129.64345"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84341"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84347"
|
||||
id="linearGradient84349"
|
||||
x1="212.05453"
|
||||
y1="215.20055"
|
||||
x2="237.73705"
|
||||
y2="230.02835"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84363"
|
||||
x1="193.61516"
|
||||
y1="225.045"
|
||||
x2="224.08698"
|
||||
y2="223.54327"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84377"
|
||||
x1="182.72513"
|
||||
y1="222.54439"
|
||||
x2="184.01024"
|
||||
y2="210.35291"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84408"
|
||||
x1="211.73801"
|
||||
y1="225.48302"
|
||||
x2="204.24324"
|
||||
y2="238.46432"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84422"
|
||||
x1="190.931"
|
||||
y1="221.83777"
|
||||
x2="187.53873"
|
||||
y2="229.26593"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84425"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84441"
|
||||
x1="169.95944"
|
||||
y1="215.77036"
|
||||
x2="174.0289"
|
||||
y2="207.81528"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84455"
|
||||
x1="234.08092"
|
||||
y1="252.39755"
|
||||
x2="245.88477"
|
||||
y2="251.21777"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84461"
|
||||
id="radialGradient84463"
|
||||
cx="213.19594"
|
||||
cy="223.40646"
|
||||
fx="214.12064"
|
||||
fy="217.34077"
|
||||
r="33.39888"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(2.6813748,0.05304973,-0.0423372,2.1399146,-349.74924,-255.6421)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84469"
|
||||
id="radialGradient84471"
|
||||
cx="207.18298"
|
||||
cy="211.06483"
|
||||
fx="207.18298"
|
||||
fy="211.06483"
|
||||
r="2.77954"
|
||||
gradientTransform="matrix(1.4407627,0.18685239,-0.24637721,1.8997405,-38.989952,-218.98841)"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84477"
|
||||
id="linearGradient84479"
|
||||
x1="241.60336"
|
||||
y1="255.46982"
|
||||
x2="244.45177"
|
||||
y2="250.4846"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
</defs>
|
||||
<sodipodi:namedview id="base" pagecolor="#ffffff" bordercolor="#666666" borderopacity="1.0" inkscape:pageopacity="0.0" inkscape:pageshadow="2" inkscape:zoom="0.7" inkscape:cx="399.28571" inkscape:cy="187.14286" inkscape:document-units="mm" inkscape:current-layer="g1208" showgrid="false" fit-margin-top="10" fit-margin-left="10" fit-margin-right="10" fit-margin-bottom="10" inkscape:window-width="2560" inkscape:window-height="1403" inkscape:window-x="0" inkscape:window-y="0" inkscape:window-maximized="1" inkscape:pagecheckerboard="0" inkscape:showpageshadow="2" inkscape:deskcolor="#d1d1d1"/>
|
||||
<metadata id="metadata2671">
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1"
|
||||
inkscape:cx="517.27113"
|
||||
inkscape:cy="314.79773"
|
||||
inkscape:document-units="mm"
|
||||
inkscape:current-layer="layer1"
|
||||
inkscape:document-rotation="0"
|
||||
showgrid="false"
|
||||
units="px"
|
||||
inkscape:snap-global="false"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1376"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0" />
|
||||
<metadata
|
||||
id="metadata5">
|
||||
<rdf:RDF>
|
||||
<cc:Work rdf:about="">
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g inkscape:label="Layer 1" inkscape:groupmode="layer" id="layer1" transform="translate(378.90631,201.21016)">
|
||||
<g id="g1208">
|
||||
<g id="g81584" transform="matrix(1.7276536,0,0,1.7276536,-401.82487,-530.26362)" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/new skopeo/skopeo-logomark_medium_transparent-bg.png" inkscape:export-xdpi="51.86108" inkscape:export-ydpi="51.86108">
|
||||
<g style="fill:#ffffff;fill-opacity:1;stroke:#3c6eb4;stroke-opacity:1" id="g81528" transform="translate(-734.38295,98.0028)">
|
||||
<path inkscape:connector-curvature="0" style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:#3c6eb4;stroke-width:1.05833;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 796.57913,145.63255 -19.29817,-9.23285 -4.82036,-20.8616 13.2871,-16.780616 21.38926,-0.06408 13.38485,16.701146 -4.69887,20.8897 z" id="path81526"/>
|
||||
</g>
|
||||
<g transform="matrix(0.43729507,0,0,0.43729507,42.235192,80.461942)" id="g81554">
|
||||
<rect style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81530" width="16.725054" height="9.8947001" x="158.13725" y="255.21965" transform="rotate(30)"/>
|
||||
<rect style="fill:#ffffff;stroke:#000000;stroke-width:1.32292;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6" id="rect81532" width="4.8383565" height="11.503917" x="153.28447" y="254.41505" transform="rotate(30)"/>
|
||||
<path sodipodi:nodetypes="cczc" inkscape:connector-curvature="0" id="path81534" d="m 78.802289,335.54596 -9.111984,15.78242 c 1.40192,0.25963 4.990131,-0.63196 7.869989,-5.61868 2.879866,-4.98671 2.168498,-9.07865 1.241995,-10.16374 z" style="fill:#9dc6e7;fill-opacity:1;stroke:#2a72ac;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1"/>
|
||||
<rect transform="rotate(30)" y="250.58212" x="199.54463" height="19.16976" width="31.605196" id="rect81536" style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1"/>
|
||||
<rect transform="rotate(30)" style="fill:#b3b3b3;fill-opacity:1;stroke:#808080;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81538" width="16.459545" height="15.252436" x="178.48766" y="252.54079"/>
|
||||
<g style="stroke:#808080;stroke-opacity:1" id="g81548">
|
||||
<rect style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81540" width="4.521956" height="21.377089" x="195.04353" y="249.47847" transform="rotate(30)"/>
|
||||
<rect y="251.64348" x="174.76939" height="17.047071" width="3.617183" id="rect81542" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" transform="rotate(30)"/>
|
||||
<rect style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="rect81544" width="4.8383565" height="11.503917" x="153.28447" y="254.41505" transform="rotate(30)"/>
|
||||
<rect y="249.47847" x="231.28011" height="21.377089" width="4.521956" id="rect81546" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81574;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" transform="rotate(30)"/>
|
||||
</g>
|
||||
<path inkscape:connector-curvature="0" id="path81550" d="m 47.691007,322.31629 22.49734,12.98884" style="fill:#ffffff;fill-rule:evenodd;stroke:#ffffff;stroke-width:3.02523;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
|
||||
<path style="fill:#ffffff;fill-rule:evenodd;stroke:#ffffff;stroke-width:3.02523;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 27.886021,312.45704 9.423431,5.07506" id="path81552" inkscape:connector-curvature="0"/>
|
||||
</g>
|
||||
<g transform="matrix(0.43729507,0,0,0.43729507,42.235192,101.28812)" id="g81568">
|
||||
<path style="fill:#2a72ac;fill-opacity:1;stroke:#003e6f;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" d="m 34.507847,231.71327 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z" id="path81556" inkscape:connector-curvature="0" sodipodi:nodetypes="cccccc"/>
|
||||
<path sodipodi:nodetypes="ccccc" inkscape:connector-curvature="0" id="path81558" d="m 28.119527,245.45648 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z" style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6"/>
|
||||
<path style="fill:#4d4d4d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 24.616967,251.5299 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z" id="path81560" inkscape:connector-curvature="0" sodipodi:nodetypes="ccccc"/>
|
||||
<ellipse ry="3.8438656" rx="3.8395541" style="fill:#e1ae4f;fill-opacity:1;stroke:#a1721b;stroke-width:1.81514;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1" id="ellipse81562" cx="39.230743" cy="255.66997"/>
|
||||
<path sodipodi:nodetypes="ccc" style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#9dc6e7;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 71.999346,266.02935 -8.9307,-5.38071 10.81942,-5.07707" id="path81564" inkscape:connector-curvature="0"/>
|
||||
<path style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#9dc6e7;stroke-width:1.81514;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 35.169799,245.57008 10.37702,-6.1817 -7.12581,-2.30459" id="path81566" inkscape:connector-curvature="0" sodipodi:nodetypes="ccc"/>
|
||||
</g>
|
||||
<g style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-opacity:1" id="g81582" transform="translate(0.69195604,69.064926)">
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 83.087609,145.72448 -3.6551,1.27991" id="path81570" inkscape:connector-curvature="0" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 51.138114,129.84674 1.971302,3.71206" id="path81572" inkscape:connector-curvature="0" inkscape:export-xdpi="96.181694" inkscape:export-ydpi="96.181694"/>
|
||||
<path inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" inkscape:connector-curvature="0" id="path81574" d="m 70.63337,129.84674 -2.345479,4.17978" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" sodipodi:nodetypes="cc" inkscape:export-xdpi="96.181694" inkscape:export-ydpi="96.181694"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" inkscape:connector-curvature="0" id="path81576" d="m 61.405599,166.31541 v 5.83669" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" inkscape:connector-curvature="0" id="path81578" d="m 43.729779,164.25283 4.216366,-4.18995" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" sodipodi:nodetypes="cc" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
<path inkscape:export-ydpi="96.181694" inkscape:export-xdpi="96.181694" sodipodi:nodetypes="cc" style="fill:none;fill-opacity:1;stroke:#9dc6e7;stroke-width:0.79375;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" d="m 79.100039,164.25283 -1.50358,-1.57071" id="path81580" inkscape:connector-curvature="0" inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"/>
|
||||
</g>
|
||||
</g>
|
||||
<text id="text81524" y="-73.044861" x="-363.40085" style="font-style:normal;font-weight:normal;font-size:37.592px;line-height:22.5552px;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e1ae4f;fill-opacity:1;stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" xml:space="preserve"><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#e1ae4f;fill-opacity:1;stroke-width:0.264583px" y="-73.044861" x="-363.40085" id="tspan81522" sodipodi:role="line" dx="0 0 0 0 0 0"><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#294172;fill-opacity:1" id="tspan81514">sk</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#2a72ac;fill-opacity:1" id="tspan81516">o</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#294172;fill-opacity:1" id="tspan81518">pe</tspan><tspan style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:'Montserrat Medium';fill:#2a72ac;fill-opacity:1" id="tspan81520">o</tspan></tspan></text>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-149.15784,-175.92614)">
|
||||
<g
|
||||
id="g84497"
|
||||
style="stroke-width:1.32291663;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
transform="translate(0,10.583333)">
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84485"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="299.48376"
|
||||
y="87.963303"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84487"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="258.07639"
|
||||
y="92.60083"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84489"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="253.2236"
|
||||
y="91.796227"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="86.859642"
|
||||
x="331.21924"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84491"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
</g>
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
id="path84483"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="cccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84481"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<circle
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="path84224"
|
||||
cx="213.64427"
|
||||
cy="234.18927"
|
||||
r="35.482784" />
|
||||
<circle
|
||||
r="33.39888"
|
||||
cy="234.18927"
|
||||
cx="213.64427"
|
||||
id="circle84226"
|
||||
style="fill:url(#radialGradient84463);fill-opacity:1;stroke:none;stroke-width:0.52916664;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84114"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="304.77545"
|
||||
y="97.128738"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84116"
|
||||
width="4.521956"
|
||||
height="21.377089"
|
||||
x="300.27435"
|
||||
y="96.025078"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="99.087395"
|
||||
x="283.71848"
|
||||
height="15.252436"
|
||||
width="16.459545"
|
||||
id="rect84118"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="98.190086"
|
||||
x="280.00021"
|
||||
height="17.047071"
|
||||
width="3.617183"
|
||||
id="rect84120"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84122"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="263.36807"
|
||||
y="101.76627"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84124"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="258.51526"
|
||||
y="100.96166"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="96.025078"
|
||||
x="336.51093"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84126"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84325);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 207.24023,252.71811 25.53907,14.74414 8.52539,-14.76953 -25.53711,-14.74415 z"
|
||||
id="rect84313"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84128"
|
||||
d="m 215.3335,241.36799 22.49734,12.98884"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84130"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 195.97877,212.80238 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z"
|
||||
id="path84134"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccccc" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
id="path84136"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84422);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 186.31445,239.41146 1.30078,0.75 7.46485,-12.92968 -1.30078,-0.75 z"
|
||||
id="rect84410"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84349);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 193.92188,218.48568 44.21289,25.55469 2.44335,-4.23242 -44.21289,-25.55664 z"
|
||||
id="path84284"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84363);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 189.98438,240.4935 12.42187,7.16992 6.56641,-11.375 -12.42188,-7.16992 z"
|
||||
id="rect84351"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84377);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 173.69727,227.99936 12.65234,7.30273 3.88867,-6.73633 -12.65234,-7.30273 z"
|
||||
id="rect84365"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84138"
|
||||
d="m 192.47621,218.8758 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<ellipse
|
||||
cy="223.01579"
|
||||
cx="207.08998"
|
||||
id="circle84140"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
rx="3.8395541"
|
||||
ry="3.8438656" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84333);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 197.35938,212.35287 44.36523,25.64453 7.58984,-10.83203 -20.82617,-18.73242 -25.55078,-8.08399 z"
|
||||
id="path84272"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84142"
|
||||
d="m 200.6837,212.37603 11.49279,-6.98413 -8.11935,-2.73742"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84144"
|
||||
d="m 241.31895,235.3047 -8.04514,-4.75769 10.057,-4.72299"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccc"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.52899998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 241.06868,235.79543 -8.9307,-5.38071 10.81942,-5.07707"
|
||||
id="path84280"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 200.60886,211.70589 10.37702,-6.1817 -7.12581,-2.30459"
|
||||
id="path84290"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
style="fill:url(#radialGradient84471);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 206.89258,220.23959 -0.29297,0.0352 -0.23633,0.0527 -0.26953,0.0898 -0.2793,0.125 -0.23437,0.13477 -0.20508,0.14648 -0.2207,0.19532 -0.18946,0.20117 -0.006,0.008 0.004,-0.008 -0.006,0.01 -0.008,0.01 -0.004,0.004 -0.006,0.006 -0.12109,0.1582 -0.002,0.004 -0.002,0.002 -0.16406,0.26758 -0.12109,0.24804 -0.0996,0.28125 -0.0645,0.24219 -0.0371,0.26367 -0.0176,0.31641 0.008,0.18164 0.0332,0.28711 0.0527,0.23437 0.004,0.0117 0.0937,0.28516 0.11133,0.24805 0.13086,0.23046 0.16992,0.23829 0.1836,0.20898 0.21093,0.19727 0.19532,0.14843 0.25586,0.15625 0.24218,0.11719 0.26172,0.0977 0.27344,0.0684 0.27344,0.043 0.29297,0.0137 0.18164,-0.008 0.29687,-0.0351 0.24024,-0.0547 0.27539,-0.0898 0.24218,-0.10938 0.25,-0.14453 0.23047,-0.16406 0.20899,-0.1836 0.20508,-0.21875 0.125,-0.16406 0.004,-0.006 0.1582,-0.25781 0.004,-0.008 0.12695,-0.26172 0.0996,-0.27344 0.002,-0.006 0.0586,-0.24023 0.0391,-0.26563 0.0176,-0.3125 -0.008,-0.17968 -0.0332,-0.28711 -0.0527,-0.23438 -0.004,-0.0117 -0.0937,-0.28515 -0.11132,-0.24805 -0.13086,-0.23047 -0.16993,-0.23828 -0.18554,-0.20899 -0.19922,-0.18945 -0.21875,-0.16406 -0.23828,-0.14844 -0.26563,-0.12695 -0.01,-0.004 -0.21875,-0.0801 -0.28516,-0.0723 -0.27344,-0.043 -0.29492,-0.0137 z"
|
||||
id="ellipse84292"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84425);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 183.23633,227.10092 c 5.59753,3.20336 12.36881,4.51528 18.71366,3.17108 1.59516,-0.38 3.17489,-0.99021 4.44874,-2.04739 -0.73893,-0.64617 -1.68301,-0.99544 -2.49844,-1.53493 -3.78032,-2.18293 -7.56064,-4.36587 -11.34096,-6.5488 -3.10767,2.32001 -6.21533,4.64003 -9.323,6.96004 z"
|
||||
id="path84298"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84479);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 238.62695,269.97787 0.006,-0.002 0.39453,-0.27735 0.41797,-0.34179 0.002,-0.002 0.45703,-0.42382 0.47851,-0.49219 0.0156,-0.0176 0.47656,-0.53711 0.002,-0.002 0.0117,-0.0137 0.48438,-0.5918 0.0117,-0.0156 0.49023,-0.64257 0.01,-0.0137 0.49609,-0.69726 0.48047,-0.71875 0.01,-0.0137 0.46485,-0.74805 0.004,-0.008 0.002,-0.002 0.30468,-0.51562 0.008,-0.0117 0.4375,-0.78711 0.40625,-0.77734 0.008,-0.0137 0.37109,-0.77149 0.008,-0.0156 0.33789,-0.75977 0.006,-0.0156 0.30078,-0.73829 0.27148,-0.74609 0.21289,-0.66602 0.17969,-0.66796 v -0.002 l 0.12305,-0.58203 0.002,-0.0137 0.0723,-0.51562 0.0176,-0.31836 z"
|
||||
id="path84379"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84408);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 202.78906,251.42318 2.08399,1.20118 9.6289,-16.67969 -2.08203,-1.20117 z"
|
||||
id="rect84396"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84441);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 169.0918,226.26889 2.35937,1.36133 4.69336,-8.13086 -2.35937,-1.36133 z"
|
||||
id="rect84429"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84455);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 234.17188,269.53842 2.08203,1.20312 9.63086,-16.67773 -2.08399,-1.20313 z"
|
||||
id="rect84443"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#f8ead2;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 215.55025,240.82707 22.49734,12.98884"
|
||||
id="path84521"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 24 KiB |
103
go.mod
103
go.mod
@@ -3,98 +3,101 @@ module github.com/containers/skopeo
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.50.1
|
||||
github.com/containers/image/v5 v5.23.1-0.20221019201342-d92bac8cb807
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/storage v1.43.0
|
||||
github.com/containers/common v0.47.4
|
||||
github.com/containers/image/v5 v5.19.1
|
||||
github.com/containers/ocicrypt v1.1.2
|
||||
github.com/containers/storage v1.38.2
|
||||
github.com/docker/docker v20.10.12+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc1
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.2.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||
github.com/BurntSushi/toml v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/containerd/cgroups v1.0.4 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/cgroups v1.0.1 // indirect
|
||||
github.com/containerd/containerd v1.5.9 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.18+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/distribution v2.8.0+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.11.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/honeycombio/libhoney-go v1.15.8 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||
github.com/klauspost/compress v1.14.2 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kr/pretty v0.2.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/pkcs11 v1.0.3 // indirect
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
|
||||
github.com/moby/sys/mountinfo v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runc v1.1.0 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/opencontainers/selinux v1.10.2 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/opencontainers/selinux v1.10.0 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/proglottis/gpgme v0.1.1 // indirect
|
||||
github.com/prometheus/client_golang v1.11.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sigstore/sigstore v1.4.2 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.8.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.3.1 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.3.2 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.42.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
|
||||
@@ -34,7 +34,7 @@ if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
|
||||
echo " the Makefile targets WITHOUT the '-local' suffix."
|
||||
echo "***************************************************************"
|
||||
) > /dev/stderr
|
||||
sleep 5
|
||||
sleep 5s
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
|
||||
49
install.md
49
install.md
@@ -1,8 +1,7 @@
|
||||
# Installing Skopeo
|
||||
|
||||
## Distribution Packages
|
||||
`skopeo` may already be packaged in your distribution. This document lists the
|
||||
installation steps for many distros, along with their information and support links.
|
||||
`skopeo` may already be packaged in your distribution.
|
||||
|
||||
### Fedora
|
||||
|
||||
@@ -10,51 +9,30 @@ installation steps for many distros, along with their information and support li
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://src.fedoraproject.org/rpms/skopeo) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Fedora&component=skopeo&product=Fedora)
|
||||
|
||||
Fedora bugs can be reported on the Skopeo GitHub [Issues](https://github.com/containers/skopeo/issues) page.
|
||||
|
||||
### RHEL / CentOS Stream ≥ 8
|
||||
### RHEL/CentOS ≥ 8 and CentOS Stream
|
||||
|
||||
```sh
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
If you are a RHEL customer, please reach out through the official RHEL support
|
||||
channels for any issues.
|
||||
|
||||
CentOS Stream 9: [Package Info](https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/tree/c9s) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%209&version=CentOS%20Stream)
|
||||
|
||||
CentOS Stream 8: [Package Info](https://git.centos.org/rpms/skopeo/tree/c8s-stream-rhel8) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%208&version=CentOS%20Stream)
|
||||
|
||||
|
||||
### RHEL/CentOS ≤ 7.x
|
||||
|
||||
```sh
|
||||
sudo yum -y install skopeo
|
||||
```
|
||||
|
||||
CentOS 7: [Package Repo](https://git.centos.org/rpms/skopeo/tree/c7-extras)
|
||||
|
||||
### openSUSE
|
||||
|
||||
```sh
|
||||
sudo zypper install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://software.opensuse.org/package/skopeo)
|
||||
|
||||
### Alpine
|
||||
|
||||
```sh
|
||||
sudo apk add skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://pkgs.alpinelinux.org/packages?name=skopeo)
|
||||
|
||||
### macOS
|
||||
|
||||
```sh
|
||||
@@ -66,8 +44,6 @@ brew install skopeo
|
||||
$ nix-env -i skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://search.nixos.org/packages?&show=skopeo&query=skopeo)
|
||||
|
||||
### Debian
|
||||
|
||||
The skopeo package is available on [Bullseye](https://packages.debian.org/bullseye/skopeo),
|
||||
@@ -79,8 +55,6 @@ sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://packages.debian.org/stable/skopeo)
|
||||
|
||||
### Raspberry Pi OS arm64 (beta)
|
||||
|
||||
Raspberry Pi OS uses the standard Debian's repositories,
|
||||
@@ -99,7 +73,17 @@ sudo apt-get -y update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://packages.ubuntu.com/jammy/skopeo)
|
||||
The [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
provides packages for Ubuntu 20.04 (it should also work with direct derivatives like Pop!\_OS).
|
||||
|
||||
```bash
|
||||
. /etc/os-release
|
||||
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y upgrade
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
### Windows
|
||||
Skopeo has not yet been packaged for Windows. There is an [open feature
|
||||
@@ -212,13 +196,6 @@ Building in a container is simpler, but more restrictive:
|
||||
$ make binary
|
||||
```
|
||||
|
||||
### Shell completion scripts
|
||||
|
||||
Skopeo has shell completion scripts for bash, zsh, fish and powershell. They are installed as part of `make install`.
|
||||
You may have to restart your shell in order for them to take effect.
|
||||
|
||||
For instructions to manually generate and load the scripts please see `skopeo completion --help`.
|
||||
|
||||
### Installation
|
||||
|
||||
Finally, after the binary and documentation is built:
|
||||
|
||||
@@ -36,12 +36,12 @@ func (s *SkopeoSuite) SetUpSuite(c *check.C) {
|
||||
|
||||
func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
if s.regV2 != nil {
|
||||
s.regV2.tearDown(c)
|
||||
s.regV2.Close()
|
||||
}
|
||||
if s.regV2WithAuth != nil {
|
||||
//cmd := exec.Command("docker", "logout", s.regV2WithAuth)
|
||||
//c.Assert(cmd.Run(), check.IsNil)
|
||||
s.regV2WithAuth.tearDown(c)
|
||||
s.regV2WithAuth.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -31,7 +31,8 @@ const (
|
||||
v2DockerRegistryURL = "localhost:5555" // Update also policy.json
|
||||
v2s1DockerRegistryURL = "localhost:5556"
|
||||
knownWindowsOnlyImage = "docker://mcr.microsoft.com/windows/nanoserver:1909"
|
||||
knownListImage = "docker://registry.fedoraproject.org/fedora-minimal" // could have either ":latest" or "@sha256:..." appended
|
||||
knownListImageRepo = "docker://registry.fedoraproject.org/fedora-minimal"
|
||||
knownListImage = knownListImageRepo + ":38"
|
||||
)
|
||||
|
||||
type CopySuite struct {
|
||||
@@ -64,7 +65,9 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, false, false)
|
||||
s.s1Registry = setupRegistryV2At(c, v2s1DockerRegistryURL, false, true)
|
||||
|
||||
s.gpgHome = c.MkDir()
|
||||
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.gpgHome = gpgHome
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
@@ -73,18 +76,21 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := os.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
os.RemoveAll(s.gpgHome)
|
||||
}
|
||||
if s.registry != nil {
|
||||
s.registry.tearDown(c)
|
||||
s.registry.Close()
|
||||
}
|
||||
if s.s1Registry != nil {
|
||||
s.s1Registry.tearDown(c)
|
||||
s.s1Registry.Close()
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
@@ -92,20 +98,32 @@ func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestList(c *check.C) {
|
||||
dir := c.MkDir()
|
||||
dir, err := ioutil.TempDir("", "copy-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestList(c *check.C) {
|
||||
dir := c.MkDir()
|
||||
dir, err := ioutil.TempDir("", "copy-all-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir1, "oci:"+oci2)
|
||||
@@ -116,10 +134,18 @@ func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) {
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
@@ -130,11 +156,13 @@ func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoneWithManifestList(c *check.C) {
|
||||
dir1 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=index-only", knownListImage, "dir:"+dir1)
|
||||
|
||||
manifestPath := filepath.Join(dir1, "manifest.json")
|
||||
readManifest, err := os.ReadFile(manifestPath)
|
||||
readManifest, err := ioutil.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(readManifest)
|
||||
c.Assert(mimeType, check.Equals, "application/vnd.docker.distribution.manifest.list.v2+json")
|
||||
@@ -143,10 +171,18 @@ func (s *CopySuite) TestCopyNoneWithManifestList(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) {
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
@@ -157,16 +193,24 @@ func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListStorageFails(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-storage")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--multi-arch=all", knownListImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2)
|
||||
@@ -175,10 +219,16 @@ func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "dir:"+dir1)
|
||||
@@ -188,16 +238,24 @@ func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
oci1, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir1, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir2, "oci:"+oci2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
@@ -205,50 +263,68 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithDigestfileOutput(c *check.C) {
|
||||
tempdir := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
tempdir, err := ioutil.TempDir("", "tempdir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tempdir)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
digestOutPath := filepath.Join(tempdir, "digest.txt")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1)
|
||||
readDigest, err := os.ReadFile(digestOutPath)
|
||||
readDigest, err := ioutil.ReadFile(digestOutPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = digest.Parse(string(readDigest))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseListDigest(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-both")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -256,8 +332,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
|
||||
digest := manifestDigest.String()
|
||||
_, err = manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m)))
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
@@ -268,7 +344,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUsesListDigest(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-first")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -280,8 +358,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -302,7 +380,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUsesListDigest(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-second")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -314,8 +394,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -336,7 +416,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUsesListDigest(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-third")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -348,9 +430,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
var image1 imgspecv1.Image
|
||||
@@ -370,7 +452,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDigest(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-tag-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -383,7 +467,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest")
|
||||
var image1 imgspecv1.Image
|
||||
@@ -413,20 +497,28 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenImageOSDoesNotMatchRuntimeOS(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-fails-image-does-not-match-runtime")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*no image found in manifest list for architecture .*, variant .*, OS .*`, "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySucceedsWhenImageDoesNotMatchRuntimeButWeOverride(c *check.C) {
|
||||
storage := c.MkDir()
|
||||
storage, err := ioutil.TempDir("", "copy-succeeds-image-does-not-match-runtime-but-override")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoSucceeds(c, "", "--override-os=windows", "--override-arch=amd64", "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
@@ -442,12 +534,16 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause", "dir:"+dir1)
|
||||
// "push": dir: → docker(v2s2):
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"pause:unsigned")
|
||||
// The result of pushing and pulling is an unmodified image.
|
||||
@@ -461,27 +557,44 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
ociDest := "pause-latest-image"
|
||||
ociImgName := "pause"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
_, err := os.Stat(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
_, err = os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// docker v2s2 -> OCI image layout without image name
|
||||
ociDest = "pause-latest-noimage"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest)
|
||||
_, err = os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
originalImageDir := c.MkDir()
|
||||
encryptedImgDir := c.MkDir()
|
||||
decryptedImgDir := c.MkDir()
|
||||
keysDir := c.MkDir()
|
||||
undecryptedImgDir := c.MkDir()
|
||||
multiLayerImageDir := c.MkDir()
|
||||
partiallyEncryptedImgDir := c.MkDir()
|
||||
partiallyDecryptedImgDir := c.MkDir()
|
||||
|
||||
originalImageDir, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(originalImageDir)
|
||||
encryptedImgDir, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(encryptedImgDir)
|
||||
decryptedImgDir, err := ioutil.TempDir("", "copy-3")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(decryptedImgDir)
|
||||
keysDir, err := ioutil.TempDir("", "copy-4")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(keysDir)
|
||||
undecryptedImgDir, err := ioutil.TempDir("", "copy-5")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(undecryptedImgDir)
|
||||
multiLayerImageDir, err := ioutil.TempDir("", "copy-6")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(multiLayerImageDir)
|
||||
partiallyEncryptedImgDir, err := ioutil.TempDir("", "copy-7")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(partiallyEncryptedImgDir)
|
||||
partiallyDecryptedImgDir, err := ioutil.TempDir("", "copy-8")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(partiallyDecryptedImgDir)
|
||||
|
||||
// Create RSA key pair
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
@@ -490,9 +603,9 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
|
||||
publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = os.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644)
|
||||
err = ioutil.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = os.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644)
|
||||
err = ioutil.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// We can either perform encryption or decryption on the image.
|
||||
@@ -516,7 +629,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
invalidPrivateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
c.Assert(err, check.IsNil)
|
||||
invalidPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(invalidPrivateKey)
|
||||
err = os.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644)
|
||||
err = ioutil.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoFails(c, ".*no suitable key unwrapper found or none of the private keys could be used for decryption.*",
|
||||
"copy", "--decryption-key", keysDir+"/invalid_private.key",
|
||||
@@ -556,7 +669,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
}
|
||||
|
||||
func matchLayerBlobBinaryType(c *check.C, ociImageDirPath string, contentType string, matchCount int) {
|
||||
files, err := os.ReadDir(ociImageDirPath)
|
||||
files, err := ioutil.ReadDir(ociImageDirPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
foundCount := 0
|
||||
@@ -592,7 +705,7 @@ func assertDirImagesAreEqual(c *check.C, dir1, dir2 string) {
|
||||
digests := []digest.Digest{}
|
||||
for _, dir := range []string{dir1, dir2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
digest, err := manifest.Digest(m)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -610,7 +723,7 @@ func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref
|
||||
manifests := []map[string]interface{}{}
|
||||
for dir, ref := range map[string]string{dir1: ref1, dir2: ref2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
data := map[string]interface{}{}
|
||||
err = json.Unmarshal(m, &data)
|
||||
@@ -633,8 +746,12 @@ func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref
|
||||
|
||||
// Streaming (skopeo copy)
|
||||
func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "streaming-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "streaming-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
@@ -654,8 +771,12 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
oci1, err := ioutil.TempDir("", "oci-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "oci-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest")
|
||||
@@ -678,7 +799,7 @@ func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
// Verify using the upstream OCI image validator, this should catch most
|
||||
// non-compliance errors. DO NOT REMOVE THIS TEST UNLESS IT'S ABSOLUTELY
|
||||
// NECESSARY.
|
||||
err := image.ValidateLayout(oci1, nil, logger)
|
||||
err = image.ValidateLayout(oci1, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = image.ValidateLayout(oci2, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -700,7 +821,9 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
dir := c.MkDir()
|
||||
dir, err := ioutil.TempDir("", "signatures-dest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dirDest := "dir:" + dir
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
@@ -754,7 +877,9 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir := c.MkDir()
|
||||
topDir, err := ioutil.TempDir("", "dir-signatures-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDirDest := "dir:" + topDir
|
||||
|
||||
for _, suffix := range []string{"/dir1", "/dir2", "/restricted/personal", "/restricted/official", "/restricted/badidentity", "/dest"} {
|
||||
@@ -797,7 +922,9 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
|
||||
topDir := c.MkDir()
|
||||
topDir, err := ioutil.TempDir("", "compression-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
for i, t := range []struct{ fixture, remote string }{
|
||||
{"uncompressed-image-s1", "docker://" + v2DockerRegistryURL + "/compression/compression:s1"},
|
||||
@@ -832,21 +959,21 @@ func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
|
||||
func findRegularFiles(c *check.C, root string) []string {
|
||||
result := []string{}
|
||||
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
|
||||
err := filepath.Walk(root, filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.Type().IsRegular() {
|
||||
if info.Mode().IsRegular() {
|
||||
result = append(result, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}))
|
||||
c.Assert(err, check.IsNil)
|
||||
return result
|
||||
}
|
||||
|
||||
// --sign-by and policy use for docker: with lookaside
|
||||
func (s *CopySuite) TestCopyDockerLookaside(c *check.C) {
|
||||
// --sign-by and policy use for docker: with sigstore
|
||||
func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
@@ -856,19 +983,21 @@ func (s *CopySuite) TestCopyDockerLookaside(c *check.C) {
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "signatures-sigstore")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
copyDest := filepath.Join(tmpDir, "dest")
|
||||
err = os.Mkdir(copyDest, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
dirDest := "dir:" + copyDest
|
||||
plainLookaside := filepath.Join(tmpDir, "lookaside")
|
||||
splitLookasideStaging := filepath.Join(tmpDir, "lookaside-staging")
|
||||
plainSigstore := filepath.Join(tmpDir, "sigstore")
|
||||
splitSigstoreStaging := filepath.Join(tmpDir, "sigstore-staging")
|
||||
|
||||
splitLookasideReadServerHandler := http.NotFoundHandler()
|
||||
splitLookasideReadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
splitLookasideReadServerHandler.ServeHTTP(w, r)
|
||||
splitSigstoreReadServerHandler := http.NotFoundHandler()
|
||||
splitSigstoreReadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
splitSigstoreReadServerHandler.ServeHTTP(w, r)
|
||||
}))
|
||||
defer splitLookasideReadServer.Close()
|
||||
defer splitSigstoreReadServer.Close()
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
@@ -876,20 +1005,20 @@ func (s *CopySuite) TestCopyDockerLookaside(c *check.C) {
|
||||
err = os.Mkdir(registriesDir, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
registriesFile := fileFromFixture(c, "fixtures/registries.yaml",
|
||||
map[string]string{"@lookaside@": plainLookaside, "@split-staging@": splitLookasideStaging, "@split-read@": splitLookasideReadServer.URL})
|
||||
map[string]string{"@sigstore@": plainSigstore, "@split-staging@": splitSigstoreStaging, "@split-read@": splitSigstoreReadServer.URL})
|
||||
err = os.Symlink(registriesFile, filepath.Join(registriesDir, "registries.yaml"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Get an image to work with. Also verifies that we can use Docker repositories with no lookaside configured.
|
||||
// Get an image to work with. Also verifies that we can use Docker repositories with no sigstore configured.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, ourRegistry+"original/busybox")
|
||||
// Pulling an unsigned image fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest)
|
||||
|
||||
// Signing with lookaside defined succeeds,
|
||||
// Signing with sigstore defined succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"signed/busybox")
|
||||
// a signature file has been created,
|
||||
foundFiles := findRegularFiles(c, plainLookaside)
|
||||
foundFiles := findRegularFiles(c, plainSigstore)
|
||||
c.Assert(foundFiles, check.HasLen, 1)
|
||||
// and pulling a signed image succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"signed/busybox", dirDest)
|
||||
@@ -897,19 +1026,19 @@ func (s *CopySuite) TestCopyDockerLookaside(c *check.C) {
|
||||
// Deleting the image succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "delete", ourRegistry+"signed/busybox")
|
||||
// and the signature file has been deleted (but we leave the directories around).
|
||||
foundFiles = findRegularFiles(c, plainLookaside)
|
||||
foundFiles = findRegularFiles(c, plainSigstore)
|
||||
c.Assert(foundFiles, check.HasLen, 0)
|
||||
|
||||
// Signing with a read/write lookaside split succeeds,
|
||||
// Signing with a read/write sigstore split succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"public/busybox")
|
||||
// and a signature file has been created.
|
||||
foundFiles = findRegularFiles(c, splitLookasideStaging)
|
||||
foundFiles = findRegularFiles(c, splitSigstoreStaging)
|
||||
c.Assert(foundFiles, check.HasLen, 1)
|
||||
// Pulling the image fails because the read lookaside URL has not been populated:
|
||||
// Pulling the image fails because the read sigstore URL has not been populated:
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
|
||||
// Pulling the image succeeds after the read lookaside URL is available:
|
||||
splitLookasideReadServerHandler = http.FileServer(http.Dir(splitLookasideStaging))
|
||||
// Pulling the image succeeds after the read sigstore URL is available:
|
||||
splitSigstoreReadServerHandler = http.FileServer(http.Dir(splitSigstoreStaging))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
|
||||
}
|
||||
|
||||
@@ -922,7 +1051,9 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir := c.MkDir()
|
||||
topDir, err := ioutil.TempDir("", "atomic-extension")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
for _, subdir := range []string{"dirAA", "dirAD", "dirDA", "dirDD", "registries.d"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -969,6 +1100,22 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
|
||||
}
|
||||
|
||||
// copyWithSignedIdentity creates a copy of an unsigned image, adding a signature for an unrelated identity
|
||||
// This should be easier than using standalone-sign.
|
||||
func copyWithSignedIdentity(c *check.C, src, dest, signedIdentity, signBy, registriesDir string) {
|
||||
topDir, err := ioutil.TempDir("", "copyWithSignedIdentity")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
signingDir := filepath.Join(topDir, "signing-temp")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", src, "dir:"+signingDir)
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
|
||||
filepath.Join(signingDir, "manifest.json"), signedIdentity, signBy)
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, dest)
|
||||
}
|
||||
|
||||
// Both mirroring support in registries.conf, and mirrored remapIdentity support in policy.json
|
||||
func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
const regPrefix = "docker://localhost:5006/myns/mirroring-"
|
||||
@@ -980,14 +1127,16 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir := c.MkDir()
|
||||
registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable lookaside use
|
||||
topDir, err := ioutil.TempDir("", "mirrored-signatures")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable sigstore use
|
||||
dirDest := "dir:" + filepath.Join(topDir, "unused-dest")
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
|
||||
// We use X-R-S-S for this testing to avoid having to deal with the lookasides.
|
||||
// We use X-R-S-S for this testing to avoid having to deal with the sigstores.
|
||||
// A downside is that OpenShift records signatures per image, so the error messages below
|
||||
// list all signatures for other tags used for the same image as well.
|
||||
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
|
||||
@@ -1012,12 +1161,10 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)
|
||||
|
||||
// Fail if we specify an unqualified identity
|
||||
assertSkopeoFails(c, ".*Could not parse --sign-identity: repository name must be canonical.*",
|
||||
"--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=this-is-not-fully-specified", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed")
|
||||
|
||||
// Create a signature for mirroring-primary:primary-signed without pushing there.
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:primary-signed", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed")
|
||||
copyWithSignedIdentity(c, regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed",
|
||||
"localhost:5006/myns/mirroring-primary:primary-signed", "personal@example.com",
|
||||
registriesDir)
|
||||
// Verify that a correctly signed image for the primary is accessible using the primary's reference
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:primary-signed", dirDest)
|
||||
// … but verify that while it is accessible using the mirror location
|
||||
@@ -1032,7 +1179,9 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
// … it is NOT accessible when requiring a signature …
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// … until signed.
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:remapped", regPrefix+"remap:remapped", regPrefix+"remap:remapped")
|
||||
copyWithSignedIdentity(c, regPrefix+"remap:remapped", regPrefix+"remap:remapped",
|
||||
"localhost:5006/myns/mirroring-primary:remapped", "personal@example.com",
|
||||
registriesDir)
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
|
||||
// and only the remapped prefix (mirror) is accessed.
|
||||
@@ -1041,7 +1190,9 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
dir1 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "dir:"+dir1)
|
||||
}
|
||||
|
||||
@@ -1055,7 +1206,9 @@ func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWithoutTLSVerifyFalse(c *check.C) {
|
||||
topDir := c.MkDir()
|
||||
topDir, err := ioutil.TempDir("", "no-panic-on-https-response-without-tls-verify-false")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
@@ -1071,7 +1224,9 @@ func (s *CopySuite) TestCopySchemaConversion(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
topDir := c.MkDir()
|
||||
topDir, err := ioutil.TempDir("", "manifest-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
srcDir := filepath.Join(topDir, "source")
|
||||
destDir1 := filepath.Join(topDir, "dest1")
|
||||
destDir2 := filepath.Join(topDir, "dest2")
|
||||
@@ -1095,14 +1250,18 @@ func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyPreserveDigests(c *check.C) {
|
||||
topDir := c.MkDir()
|
||||
topDir, err := ioutil.TempDir("", "preserve-digests")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "dir:"+topDir)
|
||||
assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "--format=oci", "dir:"+topDir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Registry, schema2Registry string) {
|
||||
topDir := c.MkDir()
|
||||
topDir, err := ioutil.TempDir("", "schema-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
for _, subdir := range []string{"input1", "input2", "dest2"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -1136,14 +1295,16 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
dir := c.MkDir()
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://mirror.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
dir := c.MkDir()
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN.
|
||||
// With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution”
|
||||
@@ -1152,14 +1313,16 @@ func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
|
||||
dir := c.MkDir()
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
|
||||
dir := c.MkDir()
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN.
|
||||
// With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution”
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
docker:
|
||||
localhost:5555:
|
||||
lookaside: file://@lookaside@
|
||||
sigstore: file://@sigstore@
|
||||
localhost:5555/public:
|
||||
lookaside-staging: file://@split-staging@
|
||||
lookaside: @split-read@
|
||||
sigstore-staging: file://@split-staging@
|
||||
sigstore: @split-read@
|
||||
|
||||
@@ -5,13 +5,14 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
@@ -32,7 +33,10 @@ type openshiftCluster struct {
|
||||
// in isolated test environment.
|
||||
func startOpenshiftCluster(c *check.C) *openshiftCluster {
|
||||
cluster := &openshiftCluster{}
|
||||
cluster.workingDir = c.MkDir()
|
||||
|
||||
dir, err := ioutil.TempDir("", "openshift-cluster")
|
||||
c.Assert(err, check.IsNil)
|
||||
cluster.workingDir = dir
|
||||
|
||||
cluster.startMaster(c)
|
||||
cluster.prepareRegistryConfig(c)
|
||||
@@ -192,7 +196,7 @@ func (cluster *openshiftCluster) startRegistry(c *check.C) {
|
||||
// The default configuration currently already contains acceptschema2: false
|
||||
})
|
||||
// Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated.
|
||||
configContents, err := os.ReadFile(schema1Config)
|
||||
configContents, err := ioutil.ReadFile(schema1Config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*")
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config))
|
||||
@@ -236,7 +240,7 @@ func (cluster *openshiftCluster) dockerLogin(c *check.C) {
|
||||
}`, port, authValue))
|
||||
}
|
||||
configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}`
|
||||
err = os.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
err = ioutil.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
@@ -254,12 +258,12 @@ func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) {
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
func (cluster *openshiftCluster) tearDown(c *check.C) {
|
||||
for i := len(cluster.processes) - 1; i >= 0; i-- {
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = cluster.processes[i].Process.Kill()
|
||||
cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.workingDir != "" {
|
||||
os.RemoveAll(cluster.workingDir)
|
||||
}
|
||||
if cluster.dockerDir != "" {
|
||||
err := os.RemoveAll(cluster.dockerDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
os.RemoveAll(cluster.dockerDir)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,15 +15,11 @@ TestRunShell is not really a test; it is a convenient way to use the registry se
|
||||
in openshift.go and CopySuite to get an interactive environment for experimentation.
|
||||
|
||||
To use it, run:
|
||||
|
||||
sudo make shell
|
||||
|
||||
to start a container, then within the container:
|
||||
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -57,7 +57,7 @@ type pipefd struct {
|
||||
fd *os.File
|
||||
}
|
||||
|
||||
func (p *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
req := request{
|
||||
Method: method,
|
||||
Args: args,
|
||||
@@ -66,7 +66,7 @@ func (p *proxy) call(method string, args []interface{}) (rval interface{}, fd *p
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err := p.c.Write(reqbuf)
|
||||
n, err := self.c.Write(reqbuf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func (p *proxy) call(method string, args []interface{}) (rval interface{}, fd *p
|
||||
}
|
||||
oob := make([]byte, syscall.CmsgSpace(1))
|
||||
replybuf := make([]byte, maxMsgSize)
|
||||
n, oobn, _, _, err := p.c.ReadMsgUnix(replybuf, oob)
|
||||
n, oobn, _, _, err := self.c.ReadMsgUnix(replybuf, oob)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("reading reply: %v", err)
|
||||
return
|
||||
@@ -119,9 +119,9 @@ func (p *proxy) call(method string, args []interface{}) (rval interface{}, fd *p
|
||||
return
|
||||
}
|
||||
|
||||
func (p *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
func (self *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = p.call(method, args)
|
||||
rval, fd, err = self.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -132,9 +132,9 @@ func (p *proxy) callNoFd(method string, args []interface{}) (rval interface{}, e
|
||||
return rval, nil
|
||||
}
|
||||
|
||||
func (p *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
func (self *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = p.call(method, args)
|
||||
rval, fd, err = self.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -144,13 +144,13 @@ func (p *proxy) callReadAllBytes(method string, args []interface{}) (rval interf
|
||||
}
|
||||
fetchchan := make(chan byteFetch)
|
||||
go func() {
|
||||
manifestBytes, err := io.ReadAll(fd.fd)
|
||||
manifestBytes, err := ioutil.ReadAll(fd.fd)
|
||||
fetchchan <- byteFetch{
|
||||
content: manifestBytes,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
_, err = p.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
_, err = self.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
}
|
||||
imgid := uint32(imgidv)
|
||||
|
||||
_, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
v, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -250,7 +250,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, configBytes, err := p.callReadAllBytes("GetFullConfig", []interface{}{imgid})
|
||||
v, configBytes, err := p.callReadAllBytes("GetFullConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -269,7 +269,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
}
|
||||
|
||||
// Also test this legacy interface
|
||||
_, ctrconfigBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
v, ctrconfigBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -285,9 +285,6 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
}
|
||||
|
||||
_, err = p.callNoFd("CloseImage", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -19,6 +20,7 @@ const (
|
||||
type testRegistryV2 struct {
|
||||
cmd *exec.Cmd
|
||||
url string
|
||||
dir string
|
||||
username string
|
||||
password string
|
||||
email string
|
||||
@@ -43,7 +45,10 @@ func setupRegistryV2At(c *check.C, url string, auth, schema1 bool) *testRegistry
|
||||
}
|
||||
|
||||
func newTestRegistryV2At(c *check.C, url string, auth, schema1 bool) (*testRegistryV2, error) {
|
||||
tmp := c.MkDir()
|
||||
tmp, err := ioutil.TempDir("", "registry-test-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
template := `version: 0.1
|
||||
loglevel: debug
|
||||
storage:
|
||||
@@ -53,9 +58,6 @@ storage:
|
||||
enabled: true
|
||||
http:
|
||||
addr: %s
|
||||
compatibility:
|
||||
schema1:
|
||||
enabled: true
|
||||
%s`
|
||||
var (
|
||||
htpasswd string
|
||||
@@ -69,7 +71,7 @@ compatibility:
|
||||
username = "testuser"
|
||||
password = "testpassword"
|
||||
email = "test@test.org"
|
||||
if err := os.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
if err := ioutil.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
htpasswd = fmt.Sprintf(`auth:
|
||||
@@ -84,18 +86,19 @@ compatibility:
|
||||
return nil, err
|
||||
}
|
||||
if _, err := fmt.Fprintf(config, template, tmp, url, htpasswd); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
binary := binaryV2
|
||||
if schema1 {
|
||||
cmd = exec.Command(binaryV2Schema1, confPath)
|
||||
} else {
|
||||
cmd = exec.Command(binaryV2, "serve", confPath)
|
||||
binary = binaryV2Schema1
|
||||
}
|
||||
|
||||
cmd := exec.Command(binary, confPath)
|
||||
consumeAndLogOutputs(c, fmt.Sprintf("registry-%s", url), cmd)
|
||||
if err := cmd.Start(); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
if os.IsNotExist(err) {
|
||||
c.Skip(err.Error())
|
||||
}
|
||||
@@ -104,6 +107,7 @@ compatibility:
|
||||
return &testRegistryV2{
|
||||
cmd: cmd,
|
||||
url: url,
|
||||
dir: tmp,
|
||||
username: username,
|
||||
password: password,
|
||||
email: email,
|
||||
@@ -122,8 +126,7 @@ func (t *testRegistryV2) Ping() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) tearDown(c *check.C) {
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = t.cmd.Process.Kill()
|
||||
func (t *testRegistryV2) Close() {
|
||||
t.cmd.Process.Kill()
|
||||
os.RemoveAll(t.dir)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -20,6 +21,7 @@ func init() {
|
||||
}
|
||||
|
||||
type SigningSuite struct {
|
||||
gpgHome string
|
||||
fingerprint string
|
||||
}
|
||||
|
||||
@@ -38,18 +40,25 @@ func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
s.gpgHome, err = ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", gpgHome, "--batch", "--gen-key")
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.fingerprint, err = findFingerprint(lines)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SigningSuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
err := os.RemoveAll(s.gpgHome)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
s.gpgHome = ""
|
||||
|
||||
os.Unsetenv("GNUPGHOME")
|
||||
}
|
||||
|
||||
@@ -64,7 +73,7 @@ func (s *SigningSuite) TestSignVerifySmoke(c *check.C) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/smoketest"
|
||||
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", sigOutput.Name(),
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -22,15 +22,15 @@ const (
|
||||
// A repository with a path with multiple components in it which
|
||||
// contains multiple tags, preferably with some tags pointing to
|
||||
// manifest lists, and with some tags that don't.
|
||||
pullableRepo = "k8s.gcr.io/coredns/coredns"
|
||||
pullableRepo = "quay.io/libpod/testimage"
|
||||
// A tagged image in the repository that we can inspect and copy.
|
||||
pullableTaggedImage = "k8s.gcr.io/coredns/coredns:v1.6.6"
|
||||
pullableTaggedImage = "registry.k8s.io/coredns/coredns:v1.6.6"
|
||||
// A tagged manifest list in the repository that we can inspect and copy.
|
||||
pullableTaggedManifestList = "k8s.gcr.io/coredns/coredns:v1.8.0"
|
||||
pullableTaggedManifestList = "registry.k8s.io/coredns/coredns:v1.8.0"
|
||||
// A repository containing multiple tags, some of which are for
|
||||
// manifest lists, and which includes a "latest" tag. We specify the
|
||||
// name here without a tag.
|
||||
pullableRepoWithLatestTag = "k8s.gcr.io/pause"
|
||||
pullableRepoWithLatestTag = "registry.k8s.io/pause"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -40,6 +40,7 @@ func init() {
|
||||
type SyncSuite struct {
|
||||
cluster *openshiftCluster
|
||||
registry *testRegistryV2
|
||||
gpgHome string
|
||||
}
|
||||
|
||||
func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
@@ -73,8 +74,10 @@ func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
// FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, registryAuth, registrySchema1)
|
||||
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.gpgHome = gpgHome
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n",
|
||||
@@ -82,7 +85,7 @@ func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := os.WriteFile(filepath.Join(gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -93,32 +96,21 @@ func (s *SyncSuite) TearDownSuite(c *check.C) {
|
||||
return
|
||||
}
|
||||
|
||||
if s.gpgHome != "" {
|
||||
os.RemoveAll(s.gpgHome)
|
||||
}
|
||||
if s.registry != nil {
|
||||
s.registry.tearDown(c)
|
||||
s.registry.Close()
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
func assertNumberOfManifestsInSubdirs(c *check.C, dir string, expectedCount int) {
|
||||
nManifests := 0
|
||||
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, expectedCount)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
@@ -144,7 +136,9 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
@@ -170,14 +164,16 @@ func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestPreserveDigests(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "--preserve-digests", "docker://"+image, "dir:"+tmpDir)
|
||||
_, err := os.Stat(path.Join(tmpDir, "manifest.json"))
|
||||
_, err = os.Stat(path.Join(tmpDir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", "--all", "--preserve-digests", "--format=oci", "docker://"+image, "dir:"+tmpDir)
|
||||
@@ -190,7 +186,8 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -198,6 +195,8 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
os.RemoveAll(dir1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
@@ -211,7 +210,8 @@ func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())))
|
||||
|
||||
//sync upstream image to dir, not scoped
|
||||
dir1 := c.MkDir()
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -226,10 +226,14 @@ func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
os.RemoveAll(dir1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepo
|
||||
@@ -251,7 +255,9 @@ func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
image := pullableRepo
|
||||
@@ -272,8 +278,7 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
|
||||
// sync to the local registry
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL)
|
||||
// sync back from local registry to a folder
|
||||
os.Remove(yamlFile)
|
||||
@@ -284,8 +289,7 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
%s: []
|
||||
`, v2DockerRegistryURL, imagePath)
|
||||
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
sysCtx = types.SystemContext{
|
||||
@@ -297,15 +301,31 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(len(localTags), check.Not(check.Equals), 0)
|
||||
c.Assert(len(localTags), check.Equals, len(tags))
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, len(tags))
|
||||
|
||||
nManifests := 0
|
||||
//count the number of manifest.json in dir1
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, len(tags))
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
registry.k8s.io:
|
||||
images-by-tag-regex:
|
||||
pause: ^[12]\.0$ # regex string test
|
||||
`
|
||||
@@ -314,35 +334,63 @@ k8s.gcr.io:
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
registry.k8s.io:
|
||||
images:
|
||||
pause:
|
||||
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
|
||||
`
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, 1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, 1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYaml2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
registry.k8s.io:
|
||||
images:
|
||||
coredns/coredns:
|
||||
- v1.8.0
|
||||
@@ -369,15 +417,29 @@ quay.io:
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
image := pullableRepoWithLatestTag
|
||||
tag := "latest"
|
||||
@@ -419,8 +481,7 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
for _, cfg := range testCfg {
|
||||
yamlConfig := fmt.Sprintf(yamlTemplate, v2DockerRegistryURL, cfg.tlsVerify, image, tag)
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
|
||||
cfg.checker(c, cfg.msg, "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
os.Remove(yamlFile)
|
||||
@@ -430,7 +491,9 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "sync-manifest-output")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
destDir1 := filepath.Join(tmpDir, "dest1")
|
||||
destDir2 := filepath.Join(tmpDir, "dest2")
|
||||
@@ -450,7 +513,9 @@ func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
@@ -481,13 +546,15 @@ func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepoWithLatestTag
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
err := os.Mkdir(dir1, 0755)
|
||||
err = os.Mkdir(dir1, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
err = os.Mkdir(dir2, 0755)
|
||||
@@ -519,7 +586,9 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
@@ -529,7 +598,9 @@ func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
assertSkopeoFails(c, ".*No images to sync found in .*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL)
|
||||
@@ -541,7 +612,9 @@ func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
const regURL = "google.com/namespace/imagename"
|
||||
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
@@ -554,7 +627,9 @@ func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
const repo = "privateimagenamethatshouldnotbepublic"
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*Registry disallows tag list retrieval.*",
|
||||
@@ -567,7 +642,9 @@ func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
repo := path.Join(v2DockerRegistryURL, "imagedoesnotexist")
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
@@ -580,9 +657,9 @@ func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDirSourceNotExisting(c *check.C) {
|
||||
// Make sure the dir does not exist!
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir = filepath.Join(tmpDir, "this-does-not-exist")
|
||||
err := os.RemoveAll(tmpDir)
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = os.RemoveAll(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = os.Stat(path.Join(tmpDir))
|
||||
c.Check(os.IsNotExist(err), check.Equals, true)
|
||||
|
||||
@@ -3,8 +3,8 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -163,15 +163,15 @@ func modifyEnviron(env []string, name, value string) []string {
|
||||
// fileFromFixtureFixture applies edits to inputPath and returns a path to the temporary file.
|
||||
// Callers should defer os.Remove(the_returned_path)
|
||||
func fileFromFixture(c *check.C, inputPath string, edits map[string]string) string {
|
||||
contents, err := os.ReadFile(inputPath)
|
||||
contents, err := ioutil.ReadFile(inputPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
for template, value := range edits {
|
||||
updated := bytes.ReplaceAll(contents, []byte(template), []byte(value))
|
||||
updated := bytes.Replace(contents, []byte(template), []byte(value), -1)
|
||||
c.Assert(bytes.Equal(updated, contents), check.Equals, false, check.Commentf("Replacing %s in %#v failed", template, string(contents))) // Verify that the template has matched something and we are not silently ignoring it.
|
||||
contents = updated
|
||||
}
|
||||
|
||||
file, err := os.CreateTemp("", "policy.json")
|
||||
file, err := ioutil.TempFile("", "policy.json")
|
||||
c.Assert(err, check.IsNil)
|
||||
path := file.Name()
|
||||
|
||||
@@ -187,7 +187,7 @@ func fileFromFixture(c *check.C, inputPath string, edits map[string]string) stri
|
||||
func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
c.Logf("Running %s %s", decompressDirsBinary, strings.Join(args, " "))
|
||||
for i, dir := range args {
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
m, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d before: %s", i+1, string(m))
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
if len(out) > 0 {
|
||||
c.Logf("output: %s", out)
|
||||
}
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
m, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d after: %s", i+1, string(m))
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
|
||||
// Verify manifest in a dir: image at dir is expectedMIMEType.
|
||||
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
manifestBlob, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
|
||||
132
skopeo.spec.rpkg
132
skopeo.spec.rpkg
@@ -1,132 +0,0 @@
|
||||
# For automatic rebuilds in COPR
|
||||
|
||||
# The following tag is to get correct syntax highlighting for this file in vim text editor
|
||||
# vim: syntax=spec
|
||||
|
||||
# Any additinoal comments should go below this line or else syntax highlighting
|
||||
# may not work.
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
%global gomodulesmode GO111MODULE=on
|
||||
%global with_debug 1
|
||||
|
||||
%if 0%{?with_debug}
|
||||
%global _find_debuginfo_dwz_opts %{nil}
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%else
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
%if ! 0%{?gobuild:1}
|
||||
%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld '" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
Name: {{{ git_dir_name }}}
|
||||
Epoch: 101
|
||||
Version: {{{ git_dir_version }}}
|
||||
Release: 1%{?dist}
|
||||
Summary: Inspect container images and repositories on registries
|
||||
License: ASL 2.0
|
||||
URL: https://github.com/containers/skopeo
|
||||
VCS: {{{ git_dir_vcs }}}
|
||||
Source: {{{ git_dir_pack }}}
|
||||
%if 0%{?fedora} && ! 0%{?rhel}
|
||||
BuildRequires: btrfs-progs-devel
|
||||
%endif
|
||||
BuildRequires: golang >= 1.16.6
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: git-core
|
||||
BuildRequires: go-md2man
|
||||
%if 0%{?fedora} || 0%{?rhel} >= 9
|
||||
BuildRequires: go-rpm-macros
|
||||
%endif
|
||||
BuildRequires: pkgconfig(devmapper)
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: pkgconfig
|
||||
BuildRequires: make
|
||||
BuildRequires: ostree-devel
|
||||
%if 0%{?fedora} <= 35
|
||||
Requires: containers-common >= 4:1-39
|
||||
%else
|
||||
Requires: containers-common >= 4:1-46
|
||||
%endif
|
||||
|
||||
%description
|
||||
Command line utility to inspect images and repositories directly on Docker
|
||||
registries without the need to pull them.
|
||||
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
Requires: bats
|
||||
Requires: gnupg
|
||||
Requires: jq
|
||||
Requires: podman
|
||||
Requires: httpd-tools
|
||||
Requires: openssl
|
||||
Requires: fakeroot
|
||||
Requires: squashfs-tools
|
||||
|
||||
%description tests
|
||||
%{summary}
|
||||
|
||||
This package contains system tests for %{name}
|
||||
|
||||
%prep
|
||||
{{{ git_dir_setup_macro }}}
|
||||
|
||||
sed -i 's/install-binary: bin\/skopeo/install-binary:/' Makefile
|
||||
|
||||
# This will invoke `make` command in the directory with the extracted sources.
|
||||
%build
|
||||
%set_build_flags
|
||||
export CGO_CFLAGS=$CFLAGS
|
||||
# These extra flags present in $CFLAGS have been skipped for now as they break the build
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
|
||||
|
||||
%ifarch x86_64
|
||||
export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full"
|
||||
%endif
|
||||
|
||||
LDFLAGS=""
|
||||
|
||||
export BUILDTAGS="$(hack/libdm_tag.sh)"
|
||||
%if 0%{?rhel}
|
||||
export BUILDTAGS="$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion"
|
||||
%endif
|
||||
|
||||
%gobuild -o bin/%{name} ./cmd/%{name}
|
||||
|
||||
%install
|
||||
%{__make} PREFIX=%{buildroot}%{_prefix} install-binary install-docs install-completions
|
||||
|
||||
# system tests
|
||||
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp -pav systemtest/* %{buildroot}/%{_datadir}/%{name}/test/system/
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
%{_bindir}/%{name}
|
||||
%{_mandir}/man1/%%{name}*
|
||||
%dir %{_datadir}/bash-completion
|
||||
%dir %{_datadir}/bash-completion/completions
|
||||
%{_datadir}/bash-completion/completions/%{name}
|
||||
%dir %{_datadir}/fish
|
||||
%dir %{_datadir}/fish/vendor_completions.d
|
||||
%{_datadir}/fish/vendor_completions.d/%{name}.fish
|
||||
%dir %{_datadir}/zsh
|
||||
%dir %{_datadir}/zsh/site-functions
|
||||
%{_datadir}/zsh/site-functions/_%{name}
|
||||
|
||||
%files tests
|
||||
%license LICENSE
|
||||
%{_datadir}/%{name}/test
|
||||
|
||||
%changelog
|
||||
{{{ git_dir_changelog }}}
|
||||
@@ -32,8 +32,7 @@ load helpers
|
||||
config_digest=$(jq -r '.config.digest' <<<"$inspect_local_raw")
|
||||
|
||||
# Each SHA-named layer file (but not the config) must be listed in the output of 'inspect'.
|
||||
# In all existing versions of Skopeo (with 1.6 being the current as of this comment),
|
||||
# the output of 'inspect' lists layer digests,
|
||||
# As of Skopeo 1.6, (skopeo inspect)'s output lists layer digests,
|
||||
# but not the digest of the config blob ($config_digest), if any.
|
||||
layers=$(jq -r '.Layers' <<<"$inspect_local")
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
|
||||
@@ -50,7 +50,7 @@ function setup() {
|
||||
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo copy --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
|
||||
# zstd magic number
|
||||
local magic=$(printf "\x28\xb5\x2f\xfd")
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# list-tags tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
# list from registry
|
||||
@test "list-tags: remote repository on a registry" {
|
||||
local remote_image=quay.io/libpod/alpine_labels
|
||||
|
||||
run_skopeo list-tags "docker://${remote_image}"
|
||||
expect_output --substring "quay.io/libpod/alpine_labels"
|
||||
expect_output --substring "latest"
|
||||
}
|
||||
|
||||
# list from a local docker-archive file
|
||||
@test "list-tags: from a docker-archive file" {
|
||||
local file_name=${TEST_SOURCE_DIR}/testdata/docker-two-images.tar.xz
|
||||
|
||||
run_skopeo list-tags docker-archive:$file_name
|
||||
expect_output --substring "example.com/empty:latest"
|
||||
expect_output --substring "example.com/empty/but:different"
|
||||
|
||||
}
|
||||
|
||||
|
||||
# vim: filetype=sh
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Sync tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
}
|
||||
|
||||
@test "sync: --dry-run" {
|
||||
local remote_image=quay.io/libpod/busybox:latest
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo sync --dry-run --src docker --dest dir --scoped $remote_image $dir
|
||||
expect_output --substring "Would have copied image"
|
||||
expect_output --substring "from=\"docker://${remote_image}\" to=\"dir:${dir}/${remote_image}\""
|
||||
expect_output --substring "Would have synced 1 images from 1 sources"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
BIN
systemtest/testdata/docker-two-images.tar.xz
vendored
BIN
systemtest/testdata/docker-two-images.tar.xz
vendored
Binary file not shown.
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
@@ -1,2 +1,2 @@
|
||||
/toml.test
|
||||
toml.test
|
||||
/toml-test
|
||||
|
||||
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
191
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
191
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -1,5 +1,6 @@
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages.
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
@@ -9,7 +10,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
This library requires Go 1.13 or newer; add it to your go.mod with:
|
||||
This library requires Go 1.13 or newer; install it with:
|
||||
|
||||
% go get github.com/BurntSushi/toml@latest
|
||||
|
||||
@@ -18,7 +19,16 @@ It also comes with a TOML validator CLI tool:
|
||||
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
|
||||
% tomlv some-toml-file.toml
|
||||
|
||||
### Testing
|
||||
This package passes all tests in [toml-test] for both the decoder and the
|
||||
encoder.
|
||||
|
||||
[toml-test]: https://github.com/BurntSushi/toml-test
|
||||
|
||||
### Examples
|
||||
This package works similar to how the Go standard library handles XML and JSON.
|
||||
Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys and
|
||||
values:
|
||||
|
||||
@@ -30,7 +40,7 @@ Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
@@ -38,15 +48,20 @@ type Config struct {
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
|
||||
var conf Config
|
||||
_, err := toml.Decode(tomlData, &conf)
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML key
|
||||
value directly:
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
err := toml.Decode(tomlData, &conf)
|
||||
// handle error
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
@@ -58,63 +73,139 @@ type TOML struct {
|
||||
}
|
||||
```
|
||||
|
||||
Beware that like other decoders **only exported fields** are considered when
|
||||
encoding and decoding; private fields are silently ignored.
|
||||
Beware that like other most other decoders **only exported fields** are
|
||||
considered when encoding and decoding; private fields are silently ignored.
|
||||
|
||||
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
|
||||
Here's an example that automatically parses values in a `mail.Address`:
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Can be decoded with:
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
// Create address type which satisfies the encoding.TextUnmarshaler interface.
|
||||
type address struct {
|
||||
*mail.Address
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
func (a *address) UnmarshalText(text []byte) error {
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
a.Address, err = mail.ParseAddress(string(text))
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode it.
|
||||
func decode() {
|
||||
blob := `
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
`
|
||||
|
||||
var contacts struct {
|
||||
Contacts []address
|
||||
}
|
||||
|
||||
_, err := toml.Decode(blob, &contacts)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range contacts.Contacts {
|
||||
fmt.Printf("%#v\n", c.Address)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
|
||||
// &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
|
||||
}
|
||||
```
|
||||
|
||||
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
|
||||
a similar way.
|
||||
|
||||
### More complex usage
|
||||
See the [`_example/`](/_example) directory for a more complex example.
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_example/example.{go,toml}`.
|
||||
|
||||
302
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
302
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -1,18 +1,14 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
@@ -21,30 +17,12 @@ type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
//
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
@@ -64,10 +42,27 @@ type Primitive struct {
|
||||
// The significand precision for float32 and float64 is 24 and 53 bits; this is
|
||||
// the range a natural number can be stored in a float without loss of data.
|
||||
const (
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = 9007199254740991 // 2^53-1
|
||||
)
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decoder decodes TOML data.
|
||||
//
|
||||
// TOML tables correspond to Go structs or maps (dealer's choice – they can be
|
||||
@@ -78,9 +73,6 @@ const (
|
||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
||||
// in the local timezone.
|
||||
//
|
||||
// time.Duration types are treated as nanoseconds if the TOML value is an
|
||||
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||
// obvious Go types.
|
||||
//
|
||||
@@ -88,7 +80,7 @@ const (
|
||||
// interface, in which case any primitive TOML value (floats, strings, integers,
|
||||
// booleans, datetimes) will be converted to a []byte and given to the value's
|
||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// email addresses.
|
||||
// time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
@@ -117,7 +109,6 @@ func NewDecoder(r io.Reader) *Decoder {
|
||||
var (
|
||||
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
@@ -129,10 +120,10 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
s = "%v"
|
||||
}
|
||||
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// Check if this is a supported type: struct, map, interface{}, or something
|
||||
@@ -142,7 +133,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
|
||||
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
|
||||
return MetaData{}, e("cannot decode to type %s", rt)
|
||||
}
|
||||
|
||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||
@@ -159,30 +150,30 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
|
||||
md := MetaData{
|
||||
mapping: p.mapping,
|
||||
keyInfo: p.keyInfo,
|
||||
types: p.types,
|
||||
keys: p.ordered,
|
||||
decoded: make(map[string]struct{}, len(p.ordered)),
|
||||
context: nil,
|
||||
data: data,
|
||||
}
|
||||
return md, md.unify(p.mapping, rv)
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
@@ -193,7 +184,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == primitiveType {
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
@@ -205,14 +196,17 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
rvi := rv.Interface()
|
||||
if v, ok := rvi.(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// TODO:
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
|
||||
@@ -223,6 +217,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
@@ -248,14 +243,15 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
|
||||
return md.e("unsupported type %s", rv.Type())
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return md.e("unsupported type %s", rv.Kind())
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
@@ -264,7 +260,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return md.e("type mismatch for %s: expected table but found %T",
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
@@ -290,14 +286,13 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
md.context = append(md.context, key)
|
||||
|
||||
err := md.unify(datum, subv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -305,10 +300,10 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
keyType := rv.Type().Key().Kind()
|
||||
if keyType != reflect.String && keyType != reflect.Interface {
|
||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
keyType, rv.Type())
|
||||
if k := rv.Type().Key().Kind(); k != reflect.String {
|
||||
return fmt.Errorf(
|
||||
"toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
k, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
@@ -326,22 +321,13 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
|
||||
err := md.unify(v, indirect(rvval))
|
||||
if err != nil {
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
|
||||
switch keyType {
|
||||
case reflect.Interface:
|
||||
rvkey.Set(reflect.ValueOf(k))
|
||||
case reflect.String:
|
||||
rvkey.SetString(k)
|
||||
}
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
@@ -356,7 +342,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
return md.badtype("slice", data)
|
||||
}
|
||||
if l := datav.Len(); l != rv.Len() {
|
||||
return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
@@ -389,18 +375,6 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(json.Number)
|
||||
if ok {
|
||||
if i, ok := data.(int64); ok {
|
||||
rv.SetString(strconv.FormatInt(i, 10))
|
||||
} else if f, ok := data.(float64); ok {
|
||||
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
|
||||
} else {
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
@@ -409,13 +383,11 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
rvk := rv.Kind()
|
||||
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rvk {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
return e("value %f is out of range for float32", num)
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
@@ -427,11 +399,20 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
if num, ok := data.(int64); ok {
|
||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
|
||||
return e("value %d is out of range for float32", num)
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
|
||||
return e("value %d is out of range for float64", num)
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -439,46 +420,50 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(time.Duration)
|
||||
if ok {
|
||||
// Parse as string duration, and fall back to regular integer parsing
|
||||
// (as nanosecond) if this is not a string.
|
||||
if s, ok := data.(string); ok {
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return md.parseErr(errParseDuration{s})
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(int64(dur))
|
||||
return nil
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
num, ok := data.(int64)
|
||||
if !ok {
|
||||
return md.badtype("integer", data)
|
||||
}
|
||||
|
||||
rvk := rv.Kind()
|
||||
switch {
|
||||
case rvk >= reflect.Int && rvk <= reflect.Int64:
|
||||
if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
|
||||
(rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
|
||||
(rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
case rvk >= reflect.Uint && rvk <= reflect.Uint64:
|
||||
unum := uint64(num)
|
||||
if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
|
||||
rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
|
||||
rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
return md.badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
@@ -503,7 +488,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case encoding.TextMarshaler:
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -529,30 +514,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
}
|
||||
|
||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
||||
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
|
||||
}
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *MetaData) e(format string, args ...interface{}) error {
|
||||
f := "toml: "
|
||||
if len(md.context) > 0 {
|
||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||
p := md.keyInfo[md.context.String()].pos
|
||||
if p.Line > 0 {
|
||||
f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(f+format, args...)
|
||||
return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
@@ -571,11 +533,7 @@ func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
pvi := pv.Interface()
|
||||
if _, ok := pvi.(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
if _, ok := pvi.(Unmarshaler); ok {
|
||||
if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
@@ -591,12 +549,12 @@ func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
rvi := rv.Interface()
|
||||
if _, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rvi.(Unmarshaler); ok {
|
||||
if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
216
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
216
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -3,7 +3,6 @@ package toml
|
||||
import (
|
||||
"bufio"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -64,12 +63,6 @@ var dblQuotedReplacer = strings.NewReplacer(
|
||||
"\x7f", `\u007f`,
|
||||
)
|
||||
|
||||
var (
|
||||
marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Marshaler is the interface implemented by types that can marshal themselves
|
||||
// into valid TOML.
|
||||
type Marshaler interface {
|
||||
@@ -81,9 +74,6 @@ type Marshaler interface {
|
||||
// The mapping between Go values and TOML values should be precisely the same as
|
||||
// for the Decode* functions.
|
||||
//
|
||||
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
||||
// representation.
|
||||
//
|
||||
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
|
||||
// encoding the value as custom TOML.
|
||||
//
|
||||
@@ -95,17 +85,6 @@ type Marshaler interface {
|
||||
//
|
||||
// Go maps will be sorted alphabetically by key for deterministic output.
|
||||
//
|
||||
// The toml struct tag can be used to provide the key name; if omitted the
|
||||
// struct field name will be used. If the "omitempty" option is present the
|
||||
// following value will be skipped:
|
||||
//
|
||||
// - arrays, slices, maps, and string with len of 0
|
||||
// - struct with all zero values
|
||||
// - bool false
|
||||
//
|
||||
// If omitzero is given all int and float types with a value of 0 will be
|
||||
// skipped.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation will return an
|
||||
// error. Examples of this includes maps with non-string keys, slices with nil
|
||||
// elements, embedded non-struct types, and nested slices containing maps or
|
||||
@@ -157,15 +136,18 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// If we can marshal the type to text, then we use that. This prevents the
|
||||
// encoder for handling these types as generic structs (or whatever the
|
||||
// underlying type of a TextMarshaler is).
|
||||
switch {
|
||||
case isMarshaler(rv):
|
||||
// Special case: time needs to be in ISO8601 format.
|
||||
//
|
||||
// Special case: if we can marshal the type to text, then we used that. This
|
||||
// prevents the encoder for handling these types as generic structs (or
|
||||
// whatever the underlying type of a TextMarshaler is).
|
||||
switch t := rv.Interface().(type) {
|
||||
case time.Time, encoding.TextMarshaler, Marshaler:
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
return
|
||||
case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
|
||||
enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
case Primitive:
|
||||
enc.encode(key, reflect.ValueOf(t.undecoded))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -230,44 +212,18 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalTOML returned nil and no error"))
|
||||
}
|
||||
enc.w.Write(s)
|
||||
enc.writeQuoted(string(s))
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
s, err := v.MarshalText()
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalText returned nil and no error"))
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
return
|
||||
case time.Duration:
|
||||
enc.writeQuoted(v.String())
|
||||
return
|
||||
case json.Number:
|
||||
n, _ := rv.Interface().(json.Number)
|
||||
|
||||
if n == "" { /// Useful zero value.
|
||||
enc.w.WriteByte('0')
|
||||
return
|
||||
} else if v, err := n.Int64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
} else if v, err := n.Float64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
enc.eElement(rv.Elem())
|
||||
return
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
case reflect.Bool:
|
||||
@@ -303,7 +259,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
default:
|
||||
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
|
||||
encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,7 +280,7 @@ func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := eindirect(rv.Index(i))
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
@@ -338,7 +294,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := eindirect(rv.Index(i))
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
@@ -363,7 +319,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
|
||||
switch rv.Kind() {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv, inline)
|
||||
case reflect.Struct:
|
||||
@@ -385,7 +341,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
@@ -395,7 +351,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
for i, mapKey := range mapKeys {
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
val := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
@@ -423,13 +379,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table then all keys under it will be in that
|
||||
@@ -446,25 +395,31 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
|
||||
if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
opts := getOptions(f.Tag)
|
||||
if opts.skip {
|
||||
if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
|
||||
frv := eindirect(rv.Field(i))
|
||||
frv := rv.Field(i)
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
//
|
||||
// Non-struct anonymous fields use the normal encoding logic.
|
||||
if isEmbed {
|
||||
if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
|
||||
addFields(frv.Type(), frv, append(start, f.Index...))
|
||||
continue
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,7 +445,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
writeFields := func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
@@ -543,21 +498,6 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Struct {
|
||||
if rv.Type() == timeType {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
}
|
||||
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
@@ -569,7 +509,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if isTableArray(rv) {
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
@@ -579,35 +519,67 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
if _, ok := rv.Interface().(time.Time); ok {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
default:
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func isMarshaler(rv reflect.Value) bool {
|
||||
return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
|
||||
switch rv.Interface().(type) {
|
||||
case encoding.TextMarshaler:
|
||||
return true
|
||||
case Marshaler:
|
||||
return true
|
||||
}
|
||||
|
||||
// Someone used a pointer receiver: we can make it work for pointer values.
|
||||
if rv.CanAddr() {
|
||||
if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Addr().Interface().(Marshaler); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isTableArray reports if all entries in the array or slice are a table.
|
||||
func isTableArray(arr reflect.Value) bool {
|
||||
if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
|
||||
return false
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := true
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
tt := tomlTypeOfGo(eindirect(arr.Index(i)))
|
||||
// Don't allow nil.
|
||||
if tt == nil {
|
||||
/// Don't allow nil.
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
if tomlTypeOfGo(rv.Index(i)) == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
if ret && !typeEqual(tomlHash, tt) {
|
||||
ret = false
|
||||
}
|
||||
}
|
||||
return ret
|
||||
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
@@ -652,8 +624,6 @@ func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Struct:
|
||||
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
@@ -705,25 +675,13 @@ func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
// Resolve any level of pointers to the actual value (e.g. **string → string).
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
if isMarshaler(v) {
|
||||
return v
|
||||
}
|
||||
if v.CanAddr() { /// Special case for marshalers; see #358.
|
||||
if pv := v.Addr(); isMarshaler(pv) {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
|
||||
if v.IsNil() {
|
||||
return v
|
||||
}
|
||||
|
||||
return eindirect(v.Elem())
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
|
||||
55
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
55
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
// For example invalid syntax, duplicate keys, etc.
|
||||
//
|
||||
// In addition to the error message itself, you can also print detailed location
|
||||
// information with context by using ErrorWithPosition():
|
||||
// information with context by using ErrorWithLocation():
|
||||
//
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
//
|
||||
@@ -128,13 +128,9 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||
func (pe ParseError) ErrorWithUsage() string {
|
||||
m := pe.ErrorWithPosition()
|
||||
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
||||
lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = " " + lines[i]
|
||||
}
|
||||
}
|
||||
return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
|
||||
return m + "Error help:\n\n " +
|
||||
strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") +
|
||||
"\n"
|
||||
}
|
||||
return m
|
||||
}
|
||||
@@ -164,11 +160,6 @@ type (
|
||||
errLexInvalidDate struct{ v string }
|
||||
errLexInlineTableNL struct{}
|
||||
errLexStringNL struct{}
|
||||
errParseRange struct {
|
||||
i interface{} // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
}
|
||||
errParseDuration struct{ d string }
|
||||
)
|
||||
|
||||
func (e errLexControl) Error() string {
|
||||
@@ -188,10 +179,6 @@ func (e errLexInlineTableNL) Error() string { return "newlines not allowed withi
|
||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
|
||||
const usageEscape = `
|
||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||
@@ -240,37 +227,3 @@ Instead use """ or ''' to split strings over multiple lines:
|
||||
string = """Hello,
|
||||
world!"""
|
||||
`
|
||||
|
||||
const usageIntOverflow = `
|
||||
This number is too large; this may be an error in the TOML, but it can also be a
|
||||
bug in the program that uses too small of an integer.
|
||||
|
||||
The maximum and minimum values are:
|
||||
|
||||
size │ lowest │ highest
|
||||
───────┼────────────────┼──────────
|
||||
int8 │ -128 │ 127
|
||||
int16 │ -32,768 │ 32,767
|
||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||
uint8 │ 0 │ 255
|
||||
uint16 │ 0 │ 65535
|
||||
uint32 │ 0 │ 4294967295
|
||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||
|
||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||
`
|
||||
|
||||
const usageDuration = `
|
||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||
|
||||
ns nanoseconds (billionth of a second)
|
||||
us, µs microseconds (millionth of a second)
|
||||
ms milliseconds (thousands of a second)
|
||||
s seconds
|
||||
m minutes
|
||||
h hours
|
||||
|
||||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||
seconds.
|
||||
`
|
||||
|
||||
22
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
22
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -82,7 +82,7 @@ func (lx *lexer) nextItem() item {
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
//fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -128,11 +128,6 @@ func (lx lexer) getPos() Position {
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
// Needed for multiline strings ending with an incomplete UTF-8 sequence.
|
||||
if lx.start > lx.pos {
|
||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||
return
|
||||
}
|
||||
lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
@@ -716,17 +711,7 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||
if lx.peek() == '"' {
|
||||
/// Check if we already lexed 5 's; if so we have 6 now, and
|
||||
/// that's just too many man!
|
||||
///
|
||||
/// Second check is for the edge case:
|
||||
///
|
||||
/// two quotes allowed.
|
||||
/// vv
|
||||
/// """lol \""""""
|
||||
/// ^^ ^^^---- closing three
|
||||
/// escaped
|
||||
///
|
||||
/// But ugly, but it works
|
||||
if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) {
|
||||
if strings.HasSuffix(lx.current(), `"""""`) {
|
||||
return lx.errorf(`unexpected '""""""'`)
|
||||
}
|
||||
lx.backup()
|
||||
@@ -817,7 +802,8 @@ func lexMultilineRawString(lx *lexer) stateFn {
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
if isNL(lx.next()) { /// \ escaping newline.
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
|
||||
7
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
7
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
@@ -12,11 +12,10 @@ import (
|
||||
type MetaData struct {
|
||||
context Key // Used only during decoding.
|
||||
|
||||
keyInfo map[string]keyInfo
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]struct{}
|
||||
data []byte // Input file; for errors.
|
||||
}
|
||||
|
||||
// IsDefined reports if the key exists in the TOML data.
|
||||
@@ -51,8 +50,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
||||
// Type will return the empty string if given an empty key or a key that does
|
||||
// not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
if ki, ok := md.keyInfo[Key(key).String()]; ok {
|
||||
return ki.tomlType.typeString()
|
||||
if typ, ok := md.types[Key(key).String()]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
62
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
62
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -16,18 +16,12 @@ type parser struct {
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
pos Position // Current position in the TOML file.
|
||||
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
mapping map[string]interface{} // Map keyname → key value.
|
||||
types map[string]tomlType // Map keyname → TOML type.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
}
|
||||
|
||||
type keyInfo struct {
|
||||
pos Position
|
||||
tomlType tomlType
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -63,8 +57,8 @@ func parse(data string) (p *parser, err error) {
|
||||
}
|
||||
|
||||
p = &parser{
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]struct{}),
|
||||
@@ -80,15 +74,6 @@ func parse(data string) (p *parser, err error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
@@ -109,7 +94,7 @@ func (p *parser) panicf(format string, v ...interface{}) {
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
|
||||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
@@ -161,7 +146,7 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, false)
|
||||
p.setType("", tomlHash, item.pos)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart: // [[ .. ]]
|
||||
name := p.nextPos()
|
||||
@@ -173,7 +158,7 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemArrayTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, true)
|
||||
p.setType("", tomlArrayHash, item.pos)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart: // key = ..
|
||||
outerContext := p.context
|
||||
@@ -196,9 +181,8 @@ func (p *parser) topLevel(item item) {
|
||||
}
|
||||
|
||||
/// Set value.
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.set(p.currentKey, val, typ, vItem.pos)
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
@@ -236,7 +220,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
@@ -282,7 +266,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "int64"})
|
||||
p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
@@ -320,7 +304,7 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "float64"})
|
||||
p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||
}
|
||||
@@ -359,8 +343,9 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray, it.pos)
|
||||
p.setType(p.currentKey, tomlArray)
|
||||
|
||||
// p.setType(p.currentKey, typ)
|
||||
var (
|
||||
types []tomlType
|
||||
|
||||
@@ -429,7 +414,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ, it.pos)
|
||||
p.set(p.currentKey, val, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[p.currentKey] = val
|
||||
|
||||
@@ -548,10 +533,9 @@ func (p *parser) addContext(key Key, array bool) {
|
||||
}
|
||||
|
||||
// set calls setValue and setType.
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ, pos)
|
||||
|
||||
p.setType(key, typ)
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
@@ -615,7 +599,7 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
keyContext = append(keyContext, p.context...)
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
@@ -627,7 +611,7 @@ func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
if len(keyContext) == 0 {
|
||||
keyContext = Key{""}
|
||||
}
|
||||
p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
||||
@@ -635,7 +619,7 @@ func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||
func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) {
|
||||
p.addImplicit(key)
|
||||
p.addContext(key, false)
|
||||
@@ -663,7 +647,7 @@ func stripFirstNewline(s string) string {
|
||||
}
|
||||
|
||||
// Remove newlines inside triple-quoted strings if a line ends with "\".
|
||||
func (p *parser) stripEscapedNewlines(s string) string {
|
||||
func stripEscapedNewlines(s string) string {
|
||||
split := strings.Split(s, "\n")
|
||||
if len(split) < 1 {
|
||||
return s
|
||||
@@ -695,10 +679,6 @@ func (p *parser) stripEscapedNewlines(s string) string {
|
||||
continue
|
||||
}
|
||||
|
||||
if i == len(split)-1 {
|
||||
p.panicf("invalid escape: '\\ '")
|
||||
}
|
||||
|
||||
split[i] = line[:len(line)-1] // Remove \
|
||||
if len(split)-1 > i {
|
||||
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
|
||||
@@ -726,8 +706,10 @@ func (p *parser) replaceEscapes(it item, str string) string {
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case ' ', '\t':
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
|
||||
115
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
115
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
@@ -113,69 +113,6 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
|
||||
return hdr
|
||||
}
|
||||
|
||||
// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
|
||||
// from the tar header and returns the security descriptor into a byte slice.
|
||||
func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
// Maintaining old SDDL-based behavior for backward
|
||||
// compatibility. All new tar headers written by this library
|
||||
// will have raw binary for the security descriptor.
|
||||
var sd []byte
|
||||
var err error
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
|
||||
// current file from the tar header and returns it as a byte slice.
|
||||
func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
var eas []winio.ExtendedAttribute
|
||||
var eadata []byte
|
||||
var err error
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err = winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return eadata, nil
|
||||
}
|
||||
|
||||
// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
|
||||
// and encodes it into a byte slice. The file for which this function is called must be a
|
||||
// symlink.
|
||||
func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
return winio.EncodeReparsePoint(&rp)
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
@@ -421,10 +358,21 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
|
||||
sd, err := SecurityDescriptorFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
@@ -440,12 +388,25 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
eadata, err := ExtendedAttributesFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eadata) != 0 {
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
@@ -459,9 +420,13 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
reparse := EncodeReparsePointFromTarHeader(hdr)
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
@@ -474,9 +439,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
|
||||
6
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
6
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -144,11 +143,6 @@ func (f *win32File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClosed checks if the file has been closed
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.isSet()
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
|
||||
17
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
17
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -253,23 +252,15 @@ func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) IsClosed() bool {
|
||||
return conn.sock.IsClosed()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
if conn.IsClosed() {
|
||||
return ErrFileClosed
|
||||
}
|
||||
|
||||
err := syscall.Shutdown(conn.sock.handle, how)
|
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
@@ -278,8 +269,8 @@ func (conn *HvsockConn) CloseRead() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||
// notifying the other endpoint that no more data will be written.
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
|
||||
9
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
9
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@@ -14,6 +14,8 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
@@ -39,6 +41,13 @@ type Version uint8
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
|
||||
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package guid
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type as that is only available to builds
|
||||
// targeted at `windows`. The representation matches that used by native Windows
|
||||
// code.
|
||||
type GUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
package guid
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
15
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
@@ -3,10 +3,11 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -71,7 +72,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
// Stat (to determine if `name` is a directory).
|
||||
s, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
|
||||
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
|
||||
}
|
||||
|
||||
// Get a handle to the file/directory. Must defer Close on success.
|
||||
@@ -87,7 +88,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
sd := uintptr(0)
|
||||
origDACL := uintptr(0)
|
||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
|
||||
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||
|
||||
@@ -101,7 +102,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
|
||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||
return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
|
||||
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -119,7 +120,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
}
|
||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
|
||||
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
@@ -130,7 +131,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
// Generate pointers to the SIDs based on the string SIDs
|
||||
sid, err := syscall.StringToSid(sidVmGroup)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
|
||||
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
|
||||
}
|
||||
|
||||
inheritance := inheritModeNoInheritance
|
||||
@@ -153,7 +154,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
|
||||
modifiedDACL := uintptr(0)
|
||||
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
||||
return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
|
||||
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
|
||||
}
|
||||
|
||||
return modifiedDACL, nil
|
||||
|
||||
59
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
59
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
@@ -1,4 +1,3 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package vhd
|
||||
@@ -8,13 +7,14 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
|
||||
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
|
||||
@@ -62,27 +62,13 @@ type OpenVirtualDiskParameters struct {
|
||||
Version2 OpenVersion2
|
||||
}
|
||||
|
||||
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
|
||||
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
|
||||
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
|
||||
type openVersion2 struct {
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
resiliencyGUID guid.GUID
|
||||
}
|
||||
|
||||
type openVirtualDiskParameters struct {
|
||||
version uint32
|
||||
version2 openVersion2
|
||||
}
|
||||
|
||||
type AttachVersion2 struct {
|
||||
RestrictedOffset uint64
|
||||
RestrictedLength uint64
|
||||
}
|
||||
|
||||
type AttachVirtualDiskParameters struct {
|
||||
Version uint32
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 AttachVersion2
|
||||
}
|
||||
|
||||
@@ -160,13 +146,16 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return syscall.CloseHandle(handle)
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachVirtualDisk detaches a virtual hard disk by handle.
|
||||
func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
||||
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
||||
return fmt.Errorf("failed to detach virtual disk: %w", err)
|
||||
return errors.Wrap(err, "failed to detach virtual disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -196,7 +185,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
|
||||
parameters,
|
||||
nil,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -220,7 +209,7 @@ func AttachVhd(path string) (err error) {
|
||||
AttachVirtualDiskFlagNone,
|
||||
¶ms,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -245,35 +234,19 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
)
|
||||
if parameters.Version != 2 {
|
||||
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
||||
}
|
||||
if parameters.Version2.GetInfoOnly {
|
||||
getInfoOnly = 1
|
||||
}
|
||||
if parameters.Version2.ReadOnly {
|
||||
readOnly = 1
|
||||
}
|
||||
params := &openVirtualDiskParameters{
|
||||
version: parameters.Version,
|
||||
version2: openVersion2{
|
||||
getInfoOnly,
|
||||
readOnly,
|
||||
parameters.Version2.ResiliencyGUID,
|
||||
},
|
||||
}
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
vhdPath,
|
||||
uint32(virtualDiskAccessMask),
|
||||
uint32(openVirtualDiskFlags),
|
||||
params,
|
||||
parameters,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return 0, fmt.Errorf("failed to open virtual disk: %w", err)
|
||||
return 0, errors.Wrap(err, "failed to open virtual disk")
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -299,7 +272,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
|
||||
nil,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return handle, fmt.Errorf("failed to create virtual disk: %w", err)
|
||||
return handle, errors.Wrap(err, "failed to create virtual disk")
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -317,7 +290,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
|
||||
&diskPathSizeInBytes,
|
||||
&diskPhysicalPathBuf[0],
|
||||
); err != nil {
|
||||
return "", fmt.Errorf("failed to get disk physical path: %w", err)
|
||||
return "", errors.Wrap(err, "failed to get disk physical path")
|
||||
}
|
||||
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
|
||||
}
|
||||
@@ -341,10 +314,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
|
||||
createParams,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create differencing vhd: %w", err)
|
||||
return fmt.Errorf("failed to create differencing vhd: %s", err)
|
||||
}
|
||||
if err := syscall.CloseHandle(vhdHandle); err != nil {
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %w", err)
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
4
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
4
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
@@ -88,7 +88,7 @@ func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
var _p0 *uint16
|
||||
_p0, win32err = syscall.UTF16PtrFromString(path)
|
||||
if win32err != nil {
|
||||
@@ -97,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
|
||||
188
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
188
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@@ -4,22 +4,17 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/cow"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/jobobject"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -33,8 +28,7 @@ type System struct {
|
||||
waitBlock chan struct{}
|
||||
waitError error
|
||||
exitError error
|
||||
os, typ, owner string
|
||||
startTime time.Time
|
||||
os, typ string
|
||||
}
|
||||
|
||||
func newSystem(id string) *System {
|
||||
@@ -44,11 +38,6 @@ func newSystem(id string) *System {
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation detail for silo naming, this should NOT be relied upon very heavily.
|
||||
func siloNameFmt(containerID string) string {
|
||||
return fmt.Sprintf(`\Container_%s`, containerID)
|
||||
}
|
||||
|
||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||
operation := "hcs::CreateComputeSystem"
|
||||
@@ -138,7 +127,6 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
|
||||
}
|
||||
computeSystem.typ = strings.ToLower(props.SystemType)
|
||||
computeSystem.os = strings.ToLower(props.RuntimeOSType)
|
||||
computeSystem.owner = strings.ToLower(props.Owner)
|
||||
if computeSystem.os == "" && computeSystem.typ == "container" {
|
||||
// Pre-RS5 HCS did not return the OS, but it only supported containers
|
||||
// that ran Windows.
|
||||
@@ -207,7 +195,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, err, events)
|
||||
}
|
||||
computeSystem.startTime = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -336,115 +324,11 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// queryInProc handles querying for container properties without reaching out to HCS. `props`
|
||||
// will be updated to contain any data returned from the queries present in `types`. If any properties
|
||||
// failed to be queried they will be tallied up and returned in as the first return value. Failures on
|
||||
// query are NOT considered errors; the only failure case for this method is if the containers job object
|
||||
// cannot be opened.
|
||||
func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) {
|
||||
// In the future we can make use of some new functionality in the HCS that allows you
|
||||
// to pass a job object for HCS to use for the container. Currently, the only way we'll
|
||||
// be able to open the job/silo is if we're running as SYSTEM.
|
||||
jobOptions := &jobobject.Options{
|
||||
UseNTVariant: true,
|
||||
Name: siloNameFmt(computeSystem.id),
|
||||
}
|
||||
job, err := jobobject.Open(ctx, jobOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer job.Close()
|
||||
// PropertiesV2 returns the requested container properties targeting a V2 schema container.
|
||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
var fallbackQueryTypes []hcsschema.PropertyType
|
||||
for _, propType := range types {
|
||||
switch propType {
|
||||
case hcsschema.PTStatistics:
|
||||
// Handle a bad caller asking for the same type twice. No use in re-querying if this is
|
||||
// filled in already.
|
||||
if props.Statistics == nil {
|
||||
props.Statistics, err = computeSystem.statisticsInProc(job)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
|
||||
|
||||
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||
}
|
||||
}
|
||||
default:
|
||||
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||
}
|
||||
}
|
||||
|
||||
return fallbackQueryTypes, nil
|
||||
}
|
||||
|
||||
// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
|
||||
// change to make grabbing the private working set total much more efficient.
|
||||
func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
|
||||
// Start timestamp for these stats before we grab them to match HCS
|
||||
timestamp := time.Now()
|
||||
|
||||
memInfo, err := job.QueryMemoryStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
processorInfo, err := job.QueryProcessorStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageInfo, err := job.QueryStorageStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
|
||||
// with the class SystemProcessInformation which returns an array containing system information for *every*
|
||||
// process running on the machine. They then grab the pids that are running in the container and filter down
|
||||
// the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
|
||||
// work well as performance should get worse if more processess are running on the machine in general and not
|
||||
// just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
|
||||
// as well which isn't great and is wasted work to fetch.
|
||||
//
|
||||
// HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
|
||||
// working set ourselves and ask for everything else seperately. The optimization we can make here is
|
||||
// to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
|
||||
// the private working set in a more efficient manner by:
|
||||
//
|
||||
// 1. Find the pids running in the silo
|
||||
// 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
|
||||
// 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
|
||||
// 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
|
||||
privateWorkingSet, err := job.QueryPrivateWorkingSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hcsschema.Statistics{
|
||||
Timestamp: timestamp,
|
||||
ContainerStartTime: computeSystem.startTime,
|
||||
Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
|
||||
Memory: &hcsschema.MemoryStats{
|
||||
MemoryUsageCommitBytes: memInfo.JobMemory,
|
||||
MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
|
||||
MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
|
||||
},
|
||||
Processor: &hcsschema.ProcessorStats{
|
||||
RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
|
||||
RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
|
||||
TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
|
||||
},
|
||||
Storage: &hcsschema.StorageStats{
|
||||
ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
|
||||
ReadSizeBytes: storageInfo.ReadStats.TotalSize,
|
||||
WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
|
||||
WriteSizeBytes: storageInfo.WriteStats.TotalSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
|
||||
func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||
operation := "hcs::System::PropertiesV2"
|
||||
|
||||
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
|
||||
@@ -461,66 +345,12 @@ func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []h
|
||||
if propertiesJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
props := &hcsschema.Properties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
|
||||
properties := &hcsschema.Properties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, err, nil)
|
||||
}
|
||||
|
||||
return props, nil
|
||||
}
|
||||
|
||||
// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
|
||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
// Let HCS tally up the total for VM based queries instead of querying ourselves.
|
||||
if computeSystem.typ != "container" {
|
||||
return computeSystem.hcsPropertiesV2Query(ctx, types)
|
||||
}
|
||||
|
||||
// Define a starter Properties struct with the default fields returned from every
|
||||
// query. Owner is only returned from Statistics but it's harmless to include.
|
||||
properties := &hcsschema.Properties{
|
||||
Id: computeSystem.id,
|
||||
SystemType: computeSystem.typ,
|
||||
RuntimeOsType: computeSystem.os,
|
||||
Owner: computeSystem.owner,
|
||||
}
|
||||
|
||||
logEntry := log.G(ctx)
|
||||
// First lets try and query ourselves without reaching to HCS. If any of the queries fail
|
||||
// we'll take note and fallback to querying HCS for any of the failed types.
|
||||
fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
|
||||
if err == nil && len(fallbackTypes) == 0 {
|
||||
return properties, nil
|
||||
} else if err != nil {
|
||||
logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
|
||||
fallbackTypes = types
|
||||
}
|
||||
|
||||
logEntry.WithFields(logrus.Fields{
|
||||
logfields.ContainerID: computeSystem.id,
|
||||
"propertyTypes": fallbackTypes,
|
||||
}).Info("falling back to HCS for property type queries")
|
||||
|
||||
hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now add in anything that we might have successfully queried in process.
|
||||
if properties.Statistics != nil {
|
||||
hcsProperties.Statistics = properties.Statistics
|
||||
hcsProperties.Owner = properties.Owner
|
||||
}
|
||||
|
||||
// For future support for querying processlist in-proc as well.
|
||||
if properties.ProcessList != nil {
|
||||
hcsProperties.ProcessList = properties.ProcessList
|
||||
}
|
||||
|
||||
return hcsProperties, nil
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
|
||||
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
@@ -21,11 +21,10 @@ const (
|
||||
)
|
||||
|
||||
type NatPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
Protocol string `json:",omitempty"`
|
||||
InternalPort uint16 `json:",omitempty"`
|
||||
ExternalPort uint16 `json:",omitempty"`
|
||||
ExternalPortReserved bool `json:",omitempty"`
|
||||
Type PolicyType `json:"Type"`
|
||||
Protocol string `json:",omitempty"`
|
||||
InternalPort uint16 `json:",omitempty"`
|
||||
ExternalPort uint16 `json:",omitempty"`
|
||||
}
|
||||
|
||||
type QosPolicy struct {
|
||||
|
||||
111
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
111
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
@@ -1,111 +0,0 @@
|
||||
package jobobject
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/queue"
|
||||
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
ioInitOnce sync.Once
|
||||
initIOErr error
|
||||
// Global iocp handle that will be re-used for every job object
|
||||
ioCompletionPort windows.Handle
|
||||
// Mapping of job handle to queue to place notifications in.
|
||||
jobMap sync.Map
|
||||
)
|
||||
|
||||
// MsgAllProcessesExited is a type representing a message that every process in a job has exited.
|
||||
type MsgAllProcessesExited struct{}
|
||||
|
||||
// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently.
|
||||
// This should not be treated as an error.
|
||||
type MsgUnimplemented struct{}
|
||||
|
||||
// pollIOCP polls the io completion port forever.
|
||||
func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
|
||||
var (
|
||||
overlapped uintptr
|
||||
code uint32
|
||||
key uintptr
|
||||
)
|
||||
|
||||
for {
|
||||
err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to poll for job object message")
|
||||
continue
|
||||
}
|
||||
if val, ok := jobMap.Load(key); ok {
|
||||
msq, ok := val.(*queue.MessageQueue)
|
||||
if !ok {
|
||||
log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map")
|
||||
continue
|
||||
}
|
||||
notification, err := parseMessage(code, overlapped)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"code": code,
|
||||
"overlapped": overlapped,
|
||||
}).Warn("failed to parse job object message")
|
||||
continue
|
||||
}
|
||||
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
|
||||
// Write will only return an error when the queue is closed.
|
||||
// The only time a queue would ever be closed is when we call `Close` on
|
||||
// the job it belongs to which also removes it from the jobMap, so something
|
||||
// went wrong here. We can't return as this is reading messages for all jobs
|
||||
// so just log it and move on.
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"code": code,
|
||||
"overlapped": overlapped,
|
||||
}).Warn("tried to write to a closed queue")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.G(ctx).Warn("received a message for a job not present in the mapping")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseMessage(code uint32, overlapped uintptr) (interface{}, error) {
|
||||
// Check code and parse out relevant information related to that notification
|
||||
// that we care about. For now all we handle is the message that all processes
|
||||
// in the job have exited.
|
||||
switch code {
|
||||
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
|
||||
return MsgAllProcessesExited{}, nil
|
||||
// Other messages for completeness and a check to make sure that if we fall
|
||||
// into the default case that this is a code we don't know how to handle.
|
||||
case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME:
|
||||
case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME:
|
||||
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT:
|
||||
case winapi.JOB_OBJECT_MSG_NEW_PROCESS:
|
||||
case winapi.JOB_OBJECT_MSG_EXIT_PROCESS:
|
||||
case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
|
||||
case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT:
|
||||
case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT:
|
||||
case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT:
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown job notification type: %d", code)
|
||||
}
|
||||
return MsgUnimplemented{}, nil
|
||||
}
|
||||
|
||||
// Assigns an IO completion port to get notified of events for the registered job
|
||||
// object.
|
||||
func attachIOCP(job windows.Handle, iocp windows.Handle) error {
|
||||
info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{
|
||||
CompletionKey: job,
|
||||
CompletionPort: iocp,
|
||||
}
|
||||
_, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
|
||||
return err
|
||||
}
|
||||
538
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
538
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
@@ -1,538 +0,0 @@
|
||||
package jobobject
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/queue"
|
||||
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// This file provides higher level constructs for the win32 job object API.
|
||||
// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
|
||||
// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
|
||||
// structs and associated limit flags. Whatever is not present from the job object API
|
||||
// in golang.org/x/sys/windows is located in /internal/winapi.
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
|
||||
|
||||
// JobObject is a high level wrapper around a Windows job object. Holds a handle to
|
||||
// the job, a queue to receive iocp notifications about the lifecycle
|
||||
// of the job and a mutex for synchronized handle access.
|
||||
type JobObject struct {
|
||||
handle windows.Handle
|
||||
mq *queue.MessageQueue
|
||||
handleLock sync.RWMutex
|
||||
}
|
||||
|
||||
// JobLimits represents the resource constraints that can be applied to a job object.
|
||||
type JobLimits struct {
|
||||
CPULimit uint32
|
||||
CPUWeight uint32
|
||||
MemoryLimitInBytes uint64
|
||||
MaxIOPS int64
|
||||
MaxBandwidth int64
|
||||
}
|
||||
|
||||
type CPURateControlType uint32
|
||||
|
||||
const (
|
||||
WeightBased CPURateControlType = iota
|
||||
RateBased
|
||||
)
|
||||
|
||||
// Processor resource controls
|
||||
const (
|
||||
cpuLimitMin = 1
|
||||
cpuLimitMax = 10000
|
||||
cpuWeightMin = 1
|
||||
cpuWeightMax = 9
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAlreadyClosed = errors.New("the handle has already been closed")
|
||||
ErrNotRegistered = errors.New("job is not registered to receive notifications")
|
||||
)
|
||||
|
||||
// Options represents the set of configurable options when making or opening a job object.
|
||||
type Options struct {
|
||||
// `Name` specifies the name of the job object if a named job object is desired.
|
||||
Name string
|
||||
// `Notifications` specifies if the job will be registered to receive notifications.
|
||||
// Defaults to false.
|
||||
Notifications bool
|
||||
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
|
||||
// Defaults to false.
|
||||
UseNTVariant bool
|
||||
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
|
||||
// calls SetInformationJobObject with the JobObjectIoAttribution class.
|
||||
EnableIOTracking bool
|
||||
}
|
||||
|
||||
// Create creates a job object.
|
||||
//
|
||||
// If options.Name is an empty string, the job will not be assigned a name.
|
||||
//
|
||||
// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`.
|
||||
//
|
||||
// If `options` is nil, use default option values.
|
||||
//
|
||||
// Returns a JobObject structure and an error if there is one.
|
||||
func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||
if options == nil {
|
||||
options = &Options{}
|
||||
}
|
||||
|
||||
var jobName *winapi.UnicodeString
|
||||
if options.Name != "" {
|
||||
jobName, err = winapi.NewUnicodeString(options.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var jobHandle windows.Handle
|
||||
if options.UseNTVariant {
|
||||
oa := winapi.ObjectAttributes{
|
||||
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
||||
ObjectName: jobName,
|
||||
Attributes: 0,
|
||||
}
|
||||
status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
|
||||
if status != 0 {
|
||||
return nil, winapi.RtlNtStatusToDosError(status)
|
||||
}
|
||||
} else {
|
||||
var jobNameBuf *uint16
|
||||
if jobName != nil && jobName.Buffer != nil {
|
||||
jobNameBuf = jobName.Buffer
|
||||
}
|
||||
jobHandle, err = windows.CreateJobObject(nil, jobNameBuf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
windows.Close(jobHandle)
|
||||
}
|
||||
}()
|
||||
|
||||
job := &JobObject{
|
||||
handle: jobHandle,
|
||||
}
|
||||
|
||||
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||
// created, create it and start polling.
|
||||
if options.Notifications {
|
||||
mq, err := setupNotifications(ctx, job)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.mq = mq
|
||||
}
|
||||
|
||||
if options.EnableIOTracking {
|
||||
if err := enableIOTracking(jobHandle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// Open opens an existing job object with name provided in `options`. If no name is provided
|
||||
// return an error since we need to know what job object to open.
|
||||
//
|
||||
// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`.
|
||||
//
|
||||
// Returns a JobObject structure and an error if there is one.
|
||||
func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||
if options == nil || (options != nil && options.Name == "") {
|
||||
return nil, errors.New("no job object name specified to open")
|
||||
}
|
||||
|
||||
unicodeJobName, err := winapi.NewUnicodeString(options.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var jobHandle windows.Handle
|
||||
if options != nil && options.UseNTVariant {
|
||||
oa := winapi.ObjectAttributes{
|
||||
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
||||
ObjectName: unicodeJobName,
|
||||
Attributes: 0,
|
||||
}
|
||||
status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
|
||||
if status != 0 {
|
||||
return nil, winapi.RtlNtStatusToDosError(status)
|
||||
}
|
||||
} else {
|
||||
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
windows.Close(jobHandle)
|
||||
}
|
||||
}()
|
||||
|
||||
job := &JobObject{
|
||||
handle: jobHandle,
|
||||
}
|
||||
|
||||
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||
// created, create it and start polling.
|
||||
if options != nil && options.Notifications {
|
||||
mq, err := setupNotifications(ctx, job)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.mq = mq
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// helper function to setup notifications for creating/opening a job object
|
||||
func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
ioInitOnce.Do(func() {
|
||||
h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
initIOErr = err
|
||||
return
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go pollIOCP(ctx, h)
|
||||
})
|
||||
|
||||
if initIOErr != nil {
|
||||
return nil, initIOErr
|
||||
}
|
||||
|
||||
mq := queue.NewMessageQueue()
|
||||
jobMap.Store(uintptr(job.handle), mq)
|
||||
if err := attachIOCP(job.handle, ioCompletionPort); err != nil {
|
||||
jobMap.Delete(uintptr(job.handle))
|
||||
return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err)
|
||||
}
|
||||
return mq, nil
|
||||
}
|
||||
|
||||
// PollNotification will poll for a job object notification. This call should only be called once
|
||||
// per job (ideally in a goroutine loop) and will block if there is not a notification ready.
|
||||
// This call will return immediately with error `ErrNotRegistered` if the job was not registered
|
||||
// to receive notifications during `Create`. Internally, messages will be queued and there
|
||||
// is no worry of messages being dropped.
|
||||
func (job *JobObject) PollNotification() (interface{}, error) {
|
||||
if job.mq == nil {
|
||||
return nil, ErrNotRegistered
|
||||
}
|
||||
return job.mq.Dequeue()
|
||||
}
|
||||
|
||||
// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
|
||||
// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process
|
||||
// has already started running.
|
||||
func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if err := attrList.Update(
|
||||
winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST,
|
||||
unsafe.Pointer(&job.handle),
|
||||
unsafe.Sizeof(job.handle),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to update proc thread attributes for job object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the job object handle.
|
||||
func (job *JobObject) Close() error {
|
||||
job.handleLock.Lock()
|
||||
defer job.handleLock.Unlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if err := windows.Close(job.handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if job.mq != nil {
|
||||
job.mq.Close()
|
||||
}
|
||||
// Handles now invalid so if the map entry to receive notifications for this job still
|
||||
// exists remove it so we can stop receiving notifications.
|
||||
if _, ok := jobMap.Load(uintptr(job.handle)); ok {
|
||||
jobMap.Delete(uintptr(job.handle))
|
||||
}
|
||||
|
||||
job.handle = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Assign assigns a process to the job object.
|
||||
func (job *JobObject) Assign(pid uint32) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if pid == 0 {
|
||||
return errors.New("invalid pid: 0")
|
||||
}
|
||||
hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer windows.Close(hProc)
|
||||
return windows.AssignProcessToJobObject(job.handle, hProc)
|
||||
}
|
||||
|
||||
// Terminate terminates the job, essentially calls TerminateProcess on every process in the
|
||||
// job.
|
||||
func (job *JobObject) Terminate(exitCode uint32) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
return windows.TerminateJobObject(job.handle, exitCode)
|
||||
}
|
||||
|
||||
// Pids returns all of the process IDs in the job object.
|
||||
func (job *JobObject) Pids() ([]uint32, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{}
|
||||
err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicProcessIdList,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
)
|
||||
|
||||
// This is either the case where there is only one process or no processes in
|
||||
// the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList
|
||||
// is 1 and just return this, otherwise return an empty slice.
|
||||
if err == nil {
|
||||
if info.NumberOfProcessIdsInList == 1 {
|
||||
return []uint32{uint32(info.ProcessIdList[0])}, nil
|
||||
}
|
||||
// Return empty slice instead of nil to play well with the caller of this.
|
||||
// Do not return an error if no processes are running inside the job
|
||||
return []uint32{}, nil
|
||||
}
|
||||
|
||||
if err != winapi.ERROR_MORE_DATA {
|
||||
return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
|
||||
}
|
||||
|
||||
jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1))
|
||||
buf := make([]byte, jobBasicProcessIDListSize)
|
||||
if err = winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicProcessIdList,
|
||||
unsafe.Pointer(&buf[0]),
|
||||
uint32(len(buf)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err)
|
||||
}
|
||||
|
||||
bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
|
||||
pids := make([]uint32, bufInfo.NumberOfProcessIdsInList)
|
||||
for i, bufPid := range bufInfo.AllPids() {
|
||||
pids[i] = uint32(bufPid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// QueryMemoryStats gets the memory stats for the job object.
|
||||
func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectMemoryUsageInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for job object memory stats: %w", err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// QueryProcessorStats gets the processor stats for the job object.
|
||||
func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicAccountingInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for job object process stats: %w", err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
|
||||
// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
|
||||
// hasn't been called since creation of the job.
|
||||
func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
|
||||
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
|
||||
}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectIoAttribution,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for job object storage stats: %w", err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
|
||||
// private working set for every process running in the job.
|
||||
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
||||
pids, err := job.Pids()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
openAndQueryWorkingSet := func(pid uint32) (uint64, error) {
|
||||
h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
|
||||
if err != nil {
|
||||
// Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a
|
||||
// case where one of the pids in the job exited before we open.
|
||||
return 0, nil
|
||||
}
|
||||
defer func() {
|
||||
_ = windows.Close(h)
|
||||
}()
|
||||
// Check if the process is actually running in the job still. There's a small chance
|
||||
// that the process could have exited and had its pid re-used between grabbing the pids
|
||||
// in the job and opening the handle to it above.
|
||||
var inJob int32
|
||||
if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil {
|
||||
// This shouldn't fail unless we have incorrect access rights which we control
|
||||
// here so probably best to error out if this failed.
|
||||
return 0, err
|
||||
}
|
||||
// Don't report stats for this process as it's not running in the job. This shouldn't be
|
||||
// an error condition though.
|
||||
if inJob == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var vmCounters winapi.VM_COUNTERS_EX2
|
||||
status := winapi.NtQueryInformationProcess(
|
||||
h,
|
||||
winapi.ProcessVmCounters,
|
||||
unsafe.Pointer(&vmCounters),
|
||||
uint32(unsafe.Sizeof(vmCounters)),
|
||||
nil,
|
||||
)
|
||||
if !winapi.NTSuccess(status) {
|
||||
return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status))
|
||||
}
|
||||
return uint64(vmCounters.PrivateWorkingSetSize), nil
|
||||
}
|
||||
|
||||
var jobWorkingSetSize uint64
|
||||
for _, pid := range pids {
|
||||
workingSet, err := openAndQueryWorkingSet(pid)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
jobWorkingSetSize += workingSet
|
||||
}
|
||||
|
||||
return jobWorkingSetSize, nil
|
||||
}
|
||||
|
||||
// SetIOTracking enables IO tracking for processes in the job object.
|
||||
// This enables use of the QueryStorageStats method.
|
||||
func (job *JobObject) SetIOTracking() error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
return enableIOTracking(job.handle)
|
||||
}
|
||||
|
||||
func enableIOTracking(job windows.Handle) error {
|
||||
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
|
||||
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
|
||||
}
|
||||
if _, err := windows.SetInformationJobObject(
|
||||
job,
|
||||
winapi.JobObjectIoAttribution,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
315
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
315
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
@@ -1,315 +0,0 @@
|
||||
package jobobject
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
memoryLimitMax uint64 = 0xffffffffffffffff
|
||||
)
|
||||
|
||||
func isFlagSet(flag, controlFlags uint32) bool {
|
||||
return (flag & controlFlags) == flag
|
||||
}
|
||||
|
||||
// SetResourceLimits sets resource limits on the job object (cpu, memory, storage).
|
||||
func (job *JobObject) SetResourceLimits(limits *JobLimits) error {
|
||||
// Go through and check what limits were specified and apply them to the job.
|
||||
if limits.MemoryLimitInBytes != 0 {
|
||||
if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil {
|
||||
return fmt.Errorf("failed to set job object memory limit: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if limits.CPULimit != 0 {
|
||||
if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil {
|
||||
return fmt.Errorf("failed to set job object cpu limit: %w", err)
|
||||
}
|
||||
} else if limits.CPUWeight != 0 {
|
||||
if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil {
|
||||
return fmt.Errorf("failed to set job object cpu limit: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 {
|
||||
if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil {
|
||||
return fmt.Errorf("failed to set io limit on job object: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate
|
||||
// all processes in the job on the last open handle being closed.
|
||||
func (job *JobObject) SetTerminateOnLastHandleClose() error {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
||||
// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`.
|
||||
func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error {
|
||||
if memoryLimitInBytes >= memoryLimitMax {
|
||||
return errors.New("memory limit specified exceeds the max size")
|
||||
}
|
||||
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info.JobMemoryLimit = uintptr(memoryLimitInBytes)
|
||||
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
||||
// GetMemoryLimit gets the memory limit in bytes of the job object.
|
||||
func (job *JobObject) GetMemoryLimit() (uint64, error) {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(info.JobMemoryLimit), nil
|
||||
}
|
||||
|
||||
// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to
|
||||
// `rateControlValue` for the job object.
|
||||
func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error {
|
||||
cpuInfo, err := job.getCPURateControlInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch rateControlType {
|
||||
case WeightBased:
|
||||
if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax {
|
||||
return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue)
|
||||
}
|
||||
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
|
||||
cpuInfo.Value = rateControlValue
|
||||
case RateBased:
|
||||
if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax {
|
||||
return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue)
|
||||
}
|
||||
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
|
||||
cpuInfo.Value = rateControlValue
|
||||
default:
|
||||
return errors.New("invalid job object cpu rate control type")
|
||||
}
|
||||
return job.setCPURateControlInfo(cpuInfo)
|
||||
}
|
||||
|
||||
// GetCPULimit gets the cpu limits for the job object.
|
||||
// `rateControlType` is used to indicate what type of cpu limit to query for.
|
||||
func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) {
|
||||
info, err := job.getCPURateControlInformation()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) {
|
||||
return 0, errors.New("the job does not have cpu rate control enabled")
|
||||
}
|
||||
|
||||
switch rateControlType {
|
||||
case WeightBased:
|
||||
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) {
|
||||
return 0, errors.New("cannot get cpu weight for job object without cpu weight option set")
|
||||
}
|
||||
case RateBased:
|
||||
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) {
|
||||
return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set")
|
||||
}
|
||||
default:
|
||||
return 0, errors.New("invalid job object cpu rate control type")
|
||||
}
|
||||
return info.Value, nil
|
||||
}
|
||||
|
||||
// SetCPUAffinity sets the processor affinity for the job object.
|
||||
// The affinity is passed in as a bitmask.
|
||||
func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
|
||||
info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
||||
// GetCPUAffinity gets the processor affinity for the job object.
|
||||
// The returned affinity is a bitmask.
|
||||
func (job *JobObject) GetCPUAffinity() (uint64, error) {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(info.BasicLimitInformation.Affinity), nil
|
||||
}
|
||||
|
||||
// SetIOLimit sets the IO limits specified on the job object.
|
||||
func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error {
|
||||
ioInfo, err := job.getIOLimit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE
|
||||
if maxBandwidth != 0 {
|
||||
ioInfo.MaxBandwidth = maxBandwidth
|
||||
}
|
||||
if maxIOPS != 0 {
|
||||
ioInfo.MaxIops = maxIOPS
|
||||
}
|
||||
return job.setIORateControlInfo(ioInfo)
|
||||
}
|
||||
|
||||
// GetIOMaxBandwidthLimit gets the max bandwidth for the job object.
|
||||
func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) {
|
||||
info, err := job.getIOLimit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return info.MaxBandwidth, nil
|
||||
}
|
||||
|
||||
// GetIOMaxIopsLimit gets the max iops for the job object.
|
||||
func (job *JobObject) GetIOMaxIopsLimit() (int64, error) {
|
||||
info, err := job.getIOLimit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return info.MaxIops, nil
|
||||
}
|
||||
|
||||
// Helper function for getting a job object's extended information.
|
||||
func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectExtendedLimitInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("query %v returned error: %w", info, err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// Helper function for getting a job object's CPU rate control information.
|
||||
func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectCpuRateControlInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("query %v returned error: %w", info, err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// Helper function for setting a job object's extended information.
|
||||
func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if _, err := windows.SetInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectExtendedLimitInformation,
|
||||
uintptr(unsafe.Pointer(info)),
|
||||
uint32(unsafe.Sizeof(*info)),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function for querying job handle for IO limit information.
|
||||
func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{}
|
||||
var blockCount uint32 = 1
|
||||
|
||||
if _, err := winapi.QueryIoRateControlInformationJobObject(
|
||||
job.handle,
|
||||
nil,
|
||||
&ioInfo,
|
||||
&blockCount,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err)
|
||||
}
|
||||
|
||||
if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) {
|
||||
return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo)
|
||||
}
|
||||
return ioInfo, nil
|
||||
}
|
||||
|
||||
// Helper function for setting a job object's IO rate control information.
|
||||
func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil {
|
||||
return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function for setting a job object's CPU rate control information.
|
||||
func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
if _, err := windows.SetInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectCpuRateControlInformation,
|
||||
uintptr(unsafe.Pointer(cpuInfo)),
|
||||
uint32(unsafe.Sizeof(cpuInfo)),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
92
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
92
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
@@ -1,92 +0,0 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
|
||||
|
||||
// MessageQueue represents a threadsafe message queue to be used to retrieve or
|
||||
// write messages to.
|
||||
type MessageQueue struct {
|
||||
m *sync.RWMutex
|
||||
c *sync.Cond
|
||||
messages []interface{}
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewMessageQueue returns a new MessageQueue.
|
||||
func NewMessageQueue() *MessageQueue {
|
||||
m := &sync.RWMutex{}
|
||||
return &MessageQueue{
|
||||
m: m,
|
||||
c: sync.NewCond(m),
|
||||
messages: []interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue writes `msg` to the queue.
|
||||
func (mq *MessageQueue) Enqueue(msg interface{}) error {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
if mq.closed {
|
||||
return ErrQueueClosed
|
||||
}
|
||||
mq.messages = append(mq.messages, msg)
|
||||
// Signal a waiter that there is now a value available in the queue.
|
||||
mq.c.Signal()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dequeue will read a value from the queue and remove it. If the queue
|
||||
// is empty, this will block until the queue is closed or a value gets enqueued.
|
||||
func (mq *MessageQueue) Dequeue() (interface{}, error) {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
for !mq.closed && mq.size() == 0 {
|
||||
mq.c.Wait()
|
||||
}
|
||||
|
||||
// We got woken up, check if it's because the queue got closed.
|
||||
if mq.closed {
|
||||
return nil, ErrQueueClosed
|
||||
}
|
||||
|
||||
val := mq.messages[0]
|
||||
mq.messages[0] = nil
|
||||
mq.messages = mq.messages[1:]
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Size returns the size of the queue.
|
||||
func (mq *MessageQueue) Size() int {
|
||||
mq.m.RLock()
|
||||
defer mq.m.RUnlock()
|
||||
return mq.size()
|
||||
}
|
||||
|
||||
// Nonexported size check to check if the queue is empty inside already locked functions.
|
||||
func (mq *MessageQueue) size() int {
|
||||
return len(mq.messages)
|
||||
}
|
||||
|
||||
// Close closes the queue for future writes or reads. Any attempts to read or write from the
|
||||
// queue after close will return ErrQueueClosed. This is safe to call multiple times.
|
||||
func (mq *MessageQueue) Close() {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
// Already closed, noop
|
||||
if mq.closed {
|
||||
return
|
||||
}
|
||||
|
||||
mq.messages = nil
|
||||
mq.closed = true
|
||||
// If there's anybody currently waiting on a value from Dequeue, we need to
|
||||
// broadcast so the read(s) can return ErrQueueClosed.
|
||||
mq.c.Broadcast()
|
||||
}
|
||||
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
generated
vendored
Normal file
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
package winapi
|
||||
|
||||
//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error)
|
||||
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
@@ -24,10 +24,7 @@ const (
|
||||
// Access rights for creating or opening job objects.
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
|
||||
const (
|
||||
JOB_OBJECT_QUERY = 0x0004
|
||||
JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
||||
)
|
||||
const JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
||||
|
||||
// IO limit flags
|
||||
//
|
||||
@@ -96,7 +93,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
|
||||
|
||||
// AllPids returns all the process Ids in the job object.
|
||||
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
|
||||
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList]
|
||||
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList]
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
|
||||
@@ -165,7 +162,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
// PBOOL Result
|
||||
// );
|
||||
//
|
||||
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob
|
||||
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
|
||||
|
||||
// BOOL QueryInformationJobObject(
|
||||
// HANDLE hJob,
|
||||
@@ -175,7 +172,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
// LPDWORD lpReturnLength
|
||||
// );
|
||||
//
|
||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||
|
||||
// HANDLE OpenJobObjectW(
|
||||
// DWORD dwDesiredAccess,
|
||||
|
||||
57
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
57
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
@@ -6,60 +6,3 @@ const (
|
||||
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
|
||||
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
|
||||
)
|
||||
|
||||
// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures.
|
||||
const ProcessVmCounters = 3
|
||||
|
||||
// __kernel_entry NTSTATUS NtQueryInformationProcess(
|
||||
// [in] HANDLE ProcessHandle,
|
||||
// [in] PROCESSINFOCLASS ProcessInformationClass,
|
||||
// [out] PVOID ProcessInformation,
|
||||
// [in] ULONG ProcessInformationLength,
|
||||
// [out, optional] PULONG ReturnLength
|
||||
// );
|
||||
//
|
||||
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
|
||||
|
||||
// typedef struct _VM_COUNTERS_EX
|
||||
// {
|
||||
// SIZE_T PeakVirtualSize;
|
||||
// SIZE_T VirtualSize;
|
||||
// ULONG PageFaultCount;
|
||||
// SIZE_T PeakWorkingSetSize;
|
||||
// SIZE_T WorkingSetSize;
|
||||
// SIZE_T QuotaPeakPagedPoolUsage;
|
||||
// SIZE_T QuotaPagedPoolUsage;
|
||||
// SIZE_T QuotaPeakNonPagedPoolUsage;
|
||||
// SIZE_T QuotaNonPagedPoolUsage;
|
||||
// SIZE_T PagefileUsage;
|
||||
// SIZE_T PeakPagefileUsage;
|
||||
// SIZE_T PrivateUsage;
|
||||
// } VM_COUNTERS_EX, *PVM_COUNTERS_EX;
|
||||
//
|
||||
type VM_COUNTERS_EX struct {
|
||||
PeakVirtualSize uintptr
|
||||
VirtualSize uintptr
|
||||
PageFaultCount uint32
|
||||
PeakWorkingSetSize uintptr
|
||||
WorkingSetSize uintptr
|
||||
QuotaPeakPagedPoolUsage uintptr
|
||||
QuotaPagedPoolUsage uintptr
|
||||
QuotaPeakNonPagedPoolUsage uintptr
|
||||
QuotaNonPagedPoolUsage uintptr
|
||||
PagefileUsage uintptr
|
||||
PeakPagefileUsage uintptr
|
||||
PrivateUsage uintptr
|
||||
}
|
||||
|
||||
// typedef struct _VM_COUNTERS_EX2
|
||||
// {
|
||||
// VM_COUNTERS_EX CountersEx;
|
||||
// SIZE_T PrivateWorkingSetSize;
|
||||
// SIZE_T SharedCommitUsage;
|
||||
// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2;
|
||||
//
|
||||
type VM_COUNTERS_EX2 struct {
|
||||
CountersEx VM_COUNTERS_EX
|
||||
PrivateWorkingSetSize uintptr
|
||||
SharedCommitUsage uintptr
|
||||
}
|
||||
|
||||
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
@@ -12,8 +12,7 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
|
||||
// ULONG SystemInformationLength,
|
||||
// PULONG ReturnLength
|
||||
// );
|
||||
//
|
||||
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
||||
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
||||
|
||||
type SYSTEM_PROCESS_INFORMATION struct {
|
||||
NextEntryOffset uint32 // ULONG
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
@@ -2,4 +2,4 @@
|
||||
// be thought of as an extension to golang.org/x/sys/windows.
|
||||
package winapi
|
||||
|
||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||
|
||||
26
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@@ -50,6 +50,7 @@ var (
|
||||
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
||||
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
||||
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
|
||||
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
|
||||
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
|
||||
@@ -60,7 +61,6 @@ var (
|
||||
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
|
||||
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
||||
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||
@@ -100,7 +100,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||
return
|
||||
}
|
||||
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
@@ -140,7 +140,19 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
|
||||
return
|
||||
}
|
||||
|
||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
|
||||
func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
@@ -152,7 +164,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
|
||||
return
|
||||
}
|
||||
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
@@ -244,12 +256,6 @@ func LocalFree(ptr uintptr) {
|
||||
return
|
||||
}
|
||||
|
||||
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||
amount = uint32(r0)
|
||||
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user