mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
138 Commits
release-1.
...
v1.4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a44da449d3 | ||
|
|
788b2e2dd3 | ||
|
|
2135466ba3 | ||
|
|
3d9340c836 | ||
|
|
961d5da7ce | ||
|
|
920f0b2414 | ||
|
|
fb03e033cc | ||
|
|
caf1469b1d | ||
|
|
d70ea89050 | ||
|
|
a8f0c90206 | ||
|
|
ce6035b738 | ||
|
|
b6b7bd9250 | ||
|
|
c27d9063e5 | ||
|
|
3a8d3cb566 | ||
|
|
aeb61f656c | ||
|
|
76eb9bc9e9 | ||
|
|
a1f9318e7b | ||
|
|
64dc748e5e | ||
|
|
d82c662101 | ||
|
|
24a75c9608 | ||
|
|
f0c49b5ccc | ||
|
|
bef3b0c997 | ||
|
|
5e5506646d | ||
|
|
76bfc7f07f | ||
|
|
726d982ceb | ||
|
|
bb447f2f1e | ||
|
|
2a98df6b12 | ||
|
|
a6cf2f4293 | ||
|
|
bd309aed2a | ||
|
|
285a5cb6a0 | ||
|
|
3c2d98875d | ||
|
|
02bacf571d | ||
|
|
ae0595c56a | ||
|
|
b0ebbdd501 | ||
|
|
ec73ff3d91 | ||
|
|
ce2f64c946 | ||
|
|
e460b9aa8c | ||
|
|
643920b373 | ||
|
|
598f9e7ce3 | ||
|
|
ee05486383 | ||
|
|
2476e99cb1 | ||
|
|
074cfda358 | ||
|
|
cec7aa68f7 | ||
|
|
dc1cf646e0 | ||
|
|
76103a6c2d | ||
|
|
990908bf80 | ||
|
|
a6e745dad5 | ||
|
|
ede29c9168 | ||
|
|
75f0183edc | ||
|
|
7ace4265fb | ||
|
|
3d4fb09f2c | ||
|
|
92ad5eddcc | ||
|
|
4efeb71e28 | ||
|
|
392c6fce02 | ||
|
|
a0ce542193 | ||
|
|
0035a9aecb | ||
|
|
f80bf8a39f | ||
|
|
0fac3f10d3 | ||
|
|
c39b3dc266 | ||
|
|
07c81c7777 | ||
|
|
8eaf0329f8 | ||
|
|
378e6694c7 | ||
|
|
aeb75f3857 | ||
|
|
2286a58a39 | ||
|
|
83603a79d4 | ||
|
|
37b24aedd7 | ||
|
|
6d6c8b5609 | ||
|
|
99621f4168 | ||
|
|
09282bcf88 | ||
|
|
09ca3ba47f | ||
|
|
22908fb3e8 | ||
|
|
a37251289a | ||
|
|
e4d1392085 | ||
|
|
71e7a5839e | ||
|
|
316503341b | ||
|
|
e716b2fa66 | ||
|
|
97eaace7db | ||
|
|
846ea33b40 | ||
|
|
30c0eb03f0 | ||
|
|
7cb70f4e9c | ||
|
|
5918513ed5 | ||
|
|
b768f4e3af | ||
|
|
b20c2d45f1 | ||
|
|
fc3678038e | ||
|
|
d0f7339b77 | ||
|
|
af550fda48 | ||
|
|
012ed6610e | ||
|
|
f7aab1aba5 | ||
|
|
c30b904cbe | ||
|
|
45028801eb | ||
|
|
9fbb9abc6d | ||
|
|
69fd1d4be0 | ||
|
|
4417dc4402 | ||
|
|
8f0ae5bde6 | ||
|
|
93b819a766 | ||
|
|
ce06c87817 | ||
|
|
e7c5e9f7e6 | ||
|
|
8a1214a07b | ||
|
|
1eac38e3ce | ||
|
|
5000f745b0 | ||
|
|
b1e78efaa2 | ||
|
|
ccdaf6e0f2 | ||
|
|
d25476e4f7 | ||
|
|
298f7476d0 | ||
|
|
2fee990acc | ||
|
|
6ba1affd23 | ||
|
|
5778d9bd67 | ||
|
|
df17004709 | ||
|
|
ad4ec8b496 | ||
|
|
5f8ec87c54 | ||
|
|
abdc4a7e42 | ||
|
|
513a524d7d | ||
|
|
d4a500069e | ||
|
|
bcc18ebfb7 | ||
|
|
9b9ef675c1 | ||
|
|
dde3e759f6 | ||
|
|
622faa0b8a | ||
|
|
9a5f009ea2 | ||
|
|
865407cad0 | ||
|
|
ec13aa6d87 | ||
|
|
780de354d4 | ||
|
|
10c4c877ba | ||
|
|
e32f3f1792 | ||
|
|
a07f1e0f89 | ||
|
|
a2c8022a21 | ||
|
|
b9661b2a05 | ||
|
|
761100143a | ||
|
|
a0b6ea288d | ||
|
|
e5cb7ce196 | ||
|
|
c806083830 | ||
|
|
714ffe1b60 | ||
|
|
cac3f2b140 | ||
|
|
8efffce8be | ||
|
|
efc789be55 | ||
|
|
6452a9b6f6 | ||
|
|
184f0eee58 | ||
|
|
5af5f8a0e7 | ||
|
|
65ed9920da |
62
.cirrus.yml
62
.cirrus.yml
@@ -6,7 +6,7 @@ env:
|
||||
#### Global variables used for all tasks
|
||||
####
|
||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||
DEST_BRANCH: "master"
|
||||
DEST_BRANCH: "main"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
GOPATH: &gopath "/var/tmp/go"
|
||||
GOBIN: "${GOPATH}/bin"
|
||||
@@ -29,7 +29,7 @@ env:
|
||||
PRIOR_UBUNTU_NAME: "ubuntu-2010"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6032583541653504"
|
||||
IMAGE_SUFFIX: "c6248193773010944"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
@@ -46,7 +46,7 @@ env:
|
||||
|
||||
|
||||
# Default timeout for each task
|
||||
timeout_in: 30m
|
||||
timeout_in: 45m
|
||||
|
||||
|
||||
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
|
||||
@@ -61,14 +61,37 @@ validate_task:
|
||||
image: "${SKOPEO_CI_CONTAINER_FQIN}"
|
||||
cpu: 4
|
||||
memory: 8
|
||||
script: make validate-local
|
||||
script: |
|
||||
make validate-local
|
||||
make vendor && hack/tree_status.sh
|
||||
|
||||
doccheck_task:
|
||||
only_if: $CIRRUS_PR != ''
|
||||
depends_on:
|
||||
- validate
|
||||
container:
|
||||
image: "${FEDORA_CONTAINER_FQIN}"
|
||||
cpu: 4
|
||||
memory: 8
|
||||
env:
|
||||
BUILDTAGS: &withopengpg 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||
script: |
|
||||
# TODO: Can't use 'runner.sh setup' inside container. However,
|
||||
# removing the pre-installed package is the only necessary step
|
||||
# at the time of this comment.
|
||||
dnf erase -y skopeo # Guarantee non-interference
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
|
||||
|
||||
cross_task:
|
||||
osx_task:
|
||||
only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
macos_instance:
|
||||
image: catalina-xcode
|
||||
setup_script: |
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
brew update
|
||||
brew install gpgme go go-md2man
|
||||
go get -u golang.org/x/lint/golint
|
||||
test_script: |
|
||||
@@ -80,6 +103,28 @@ cross_task:
|
||||
/usr/local/bin/skopeo -v
|
||||
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
only_if: *not_docs
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
cross_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" cross
|
||||
|
||||
|
||||
#####
|
||||
##### NOTE: This task is subtantially duplicated in the containers/image
|
||||
##### repository's `.cirrus.yml`. Changes made here should be fully merged
|
||||
@@ -87,6 +132,7 @@ cross_task:
|
||||
#####
|
||||
test_skopeo_task:
|
||||
alias: test_skopeo
|
||||
only_if: *not_docs
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
@@ -104,15 +150,13 @@ test_skopeo_task:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
|
||||
- name: "Skopeo Test w/ opengpg"
|
||||
env:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
vendor_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
|
||||
build_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
validate_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" validate
|
||||
unit_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
|
||||
integration_script: >-
|
||||
@@ -156,6 +200,8 @@ success_task:
|
||||
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
|
||||
depends_on:
|
||||
- validate
|
||||
- doccheck
|
||||
- osx
|
||||
- cross
|
||||
- test_skopeo
|
||||
- meta
|
||||
|
||||
209
.github/workflows/multi-arch-build.yaml
vendored
Normal file
209
.github/workflows/multi-arch-build.yaml
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
---
|
||||
|
||||
# Please see contrib/<reponame>image/README.md for details on the intentions
|
||||
# of this workflow.
|
||||
#
|
||||
# BIG FAT WARNING: This workflow is duplicated across containers/skopeo,
|
||||
# containers/buildah, and containers/podman. ANY AND
|
||||
# ALL CHANGES MADE HERE MUST BE MANUALLY DUPLICATED
|
||||
# TO THE OTHER REPOS.
|
||||
|
||||
name: build multi-arch images
|
||||
|
||||
on:
|
||||
# Upstream tends to be very active, with many merges per day.
|
||||
# Only run this daily via cron schedule, or manually, not by branch push.
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
# allows to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
multi:
|
||||
name: multi-arch image build
|
||||
env:
|
||||
REPONAME: skopeo # No easy way to parse this out of $GITHUB_REPOSITORY
|
||||
# Server/namespace value used to format FQIN
|
||||
REPONAME_QUAY_REGISTRY: quay.io/skopeo
|
||||
CONTAINERS_QUAY_REGISTRY: quay.io/containers
|
||||
# list of architectures for build
|
||||
PLATFORMS: linux/amd64,linux/s390x,linux/ppc64le,linux/arm64
|
||||
# Command to execute in container to obtain project version number
|
||||
VERSION_CMD: "--version" # skopeo is the entrypoint
|
||||
|
||||
# build several images (upstream, testing, stable) in parallel
|
||||
strategy:
|
||||
# By default, failure of one matrix item cancels all others
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Builds are located under contrib/<reponame>image/<source> directory
|
||||
source:
|
||||
- upstream
|
||||
- testing
|
||||
- stable
|
||||
runs-on: ubuntu-latest
|
||||
# internal registry caches build for inspection before push
|
||||
services:
|
||||
registry:
|
||||
image: quay.io/libpod/registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: network=host
|
||||
install: true
|
||||
|
||||
- name: Build and locally push image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
|
||||
# Simple verification that stable images work, and
|
||||
# also grab version number use in forming the FQIN.
|
||||
- name: amd64 container sniff test
|
||||
if: matrix.source == 'stable'
|
||||
id: sniff_test
|
||||
run: |
|
||||
podman pull --tls-verify=false \
|
||||
localhost:5000/$REPONAME/${{ matrix.source }}
|
||||
VERSION_OUTPUT=$(podman run \
|
||||
localhost:5000/$REPONAME/${{ matrix.source }} \
|
||||
$VERSION_CMD)
|
||||
echo "$VERSION_OUTPUT"
|
||||
VERSION=$(awk -r -e "/^${REPONAME} version /"'{print $3}' <<<"$VERSION_OUTPUT")
|
||||
test -n "$VERSION"
|
||||
echo "::set-output name=version::$VERSION"
|
||||
|
||||
- name: Generate image FQIN(s) to push
|
||||
id: reponame_reg
|
||||
run: |
|
||||
if [[ "${{ matrix.source }}" == 'stable' ]]; then
|
||||
# The command version in image just built
|
||||
VERSION='v${{ steps.sniff_test.outputs.version }}'
|
||||
# workaround vim syntax-highlight bug: '
|
||||
# Push both new|updated version-tag and latest-tag FQINs
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/stable:$VERSION,$REPONAME_QUAY_REGISTRY/stable:latest"
|
||||
elif [[ "${{ matrix.source }}" == 'testing' ]]; then
|
||||
# Assume some contents changed, always push latest testing.
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/testing:latest"
|
||||
elif [[ "${{ matrix.source }}" == 'upstream' ]]; then
|
||||
# Assume some contents changed, always push latest upstream.
|
||||
FQIN="$REPONAME_QUAY_REGISTRY/upstream:latest"
|
||||
else
|
||||
echo "::error::Unknown matrix item '${{ matrix.source }}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "::warning::Pushing $FQIN"
|
||||
echo "::set-output name=fqin::${FQIN}"
|
||||
echo '::set-output name=push::true'
|
||||
|
||||
# This is substantially similar to the above logic,
|
||||
# but only handles $CONTAINERS_QUAY_REGISTRY for
|
||||
# the stable "latest" and named-version tagged images.
|
||||
- name: Generate containers reg. image FQIN(s)
|
||||
if: matrix.source == 'stable'
|
||||
id: containers_reg
|
||||
run: |
|
||||
VERSION='v${{ steps.sniff_test.outputs.version }}'
|
||||
# workaround vim syntax-highlight bug: '
|
||||
# Push both new|updated version-tag and latest-tag FQINs
|
||||
FQIN="$CONTAINERS_QUAY_REGISTRY/$REPONAME:$VERSION,$CONTAINERS_QUAY_REGISTRY/$REPONAME:latest"
|
||||
echo "::warning::Pushing $FQIN"
|
||||
echo "::set-output name=fqin::${FQIN}"
|
||||
echo '::set-output name=push::true'
|
||||
|
||||
- name: Define LABELS multi-line env. var. value
|
||||
run: |
|
||||
# This is a really hacky/strange workflow idiom, required
|
||||
# for setting multi-line $LABELS value for consumption in
|
||||
# a future step. There is literally no cleaner way to do this :<
|
||||
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#multiline-strings
|
||||
function set_labels() {
|
||||
echo 'LABELS<<DELIMITER' >> "$GITHUB_ENV"
|
||||
for line; do
|
||||
echo "$line" | tee -a "$GITHUB_ENV"
|
||||
done
|
||||
echo "DELIMITER" >> "$GITHUB_ENV"
|
||||
}
|
||||
|
||||
declare -a lines
|
||||
lines=(\
|
||||
"org.opencontainers.image.source=https://github.com/${GITHUB_REPOSITORY}.git"
|
||||
"org.opencontainers.image.revision=${GITHUB_SHA}"
|
||||
"org.opencontainers.image.created=$(date -u --iso-8601=seconds)"
|
||||
)
|
||||
|
||||
# Only the 'stable' matrix source obtains $VERSION
|
||||
if [[ "${{ matrix.source }}" == "stable" ]]; then
|
||||
lines+=(\
|
||||
"org.opencontainers.image.version=${{ steps.sniff_test.outputs.version }}"
|
||||
)
|
||||
fi
|
||||
|
||||
set_labels "${lines[@]}"
|
||||
|
||||
# Separate steps to login and push for $REPONAME_QUAY_REGISTRY and
|
||||
# $CONTAINERS_QUAY_REGISTRY are required, because 2 sets of credentials
|
||||
# are used and namespaced within the registry. At the same time, reuse
|
||||
# of non-shell steps is not supported by Github Actions nor are YAML
|
||||
# anchors/aliases, nor composite actions.
|
||||
|
||||
# Push to $REPONAME_QUAY_REGISTRY for stable, testing. and upstream
|
||||
- name: Login to ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
uses: docker/login-action@v1
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
registry: ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
# N/B: Secrets are not passed to workflows that are triggered
|
||||
# by a pull request from a fork
|
||||
username: ${{ secrets.REPONAME_QUAY_USERNAME }}
|
||||
password: ${{ secrets.REPONAME_QUAY_PASSWORD }}
|
||||
|
||||
- name: Push images to ${{ steps.reponame_reg.outputs.fqin }}
|
||||
uses: docker/build-push-action@v2
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
cache-to: type=inline
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: ${{ steps.reponame_reg.outputs.fqin }}
|
||||
labels: |
|
||||
${{ env.LABELS }}
|
||||
|
||||
# Push to $CONTAINERS_QUAY_REGISTRY only stable
|
||||
- name: Login to ${{ env.CONTAINERS_QUAY_REGISTRY }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ${{ env.CONTAINERS_QUAY_REGISTRY}}
|
||||
username: ${{ secrets.CONTAINERS_QUAY_USERNAME }}
|
||||
password: ${{ secrets.CONTAINERS_QUAY_PASSWORD }}
|
||||
|
||||
- name: Push images to ${{ steps.containers_reg.outputs.fqin }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
cache-to: type=inline
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: ${{ steps.containers_reg.outputs.fqin }}
|
||||
labels: |
|
||||
${{ env.LABELS }}
|
||||
75
.travis.yml
75
.travis.yml
@@ -1,75 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.15.x
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
||||
env:
|
||||
global:
|
||||
# Multiarch manifest will support architectures from this list. It should be the same architectures, as ones in image-build-push step in this Travis config
|
||||
- MULTIARCH_MANIFEST_ARCHITECTURES="amd64 s390x ppc64le"
|
||||
# env variables for stable image build
|
||||
- STABLE_IMAGE=quay.io/skopeo/stable:v1.2.0
|
||||
- EXTRA_STABLE_IMAGE=quay.io/containers/skopeo:v1.2.0
|
||||
# env variable for upstream image build
|
||||
- UPSTREAM_IMAGE=quay.io/skopeo/upstream:master
|
||||
|
||||
# Just declaration of the image-build-push step with script actions to execute
|
||||
x_base_steps:
|
||||
- &image-build-push
|
||||
services:
|
||||
- docker
|
||||
os: linux
|
||||
dist: focal
|
||||
script:
|
||||
# skopeo upstream image build
|
||||
- make -f release/Makefile build-image/upstream
|
||||
# Push image in case if build is started via cron job or code is pushed to master
|
||||
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
|
||||
make -f release/Makefile push-image/upstream ;
|
||||
fi
|
||||
|
||||
# skopeo stable image build
|
||||
- make -f release/Makefile build-image/stable
|
||||
# Push image in case if build is started via cron job or code is pushed to master
|
||||
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
|
||||
make -f release/Makefile push-image/stable ;
|
||||
fi
|
||||
|
||||
# Just declaration of stage order to run
|
||||
stages:
|
||||
# Build and push image for 1 architecture
|
||||
- name: image-build-push
|
||||
if: branch = master
|
||||
# Create and push image manifest to have multiarch image on top of architecture specific images
|
||||
- name: manifest-multiarch-push
|
||||
if: (type IN (push, cron)) AND branch = master
|
||||
|
||||
# Actual execution of steps
|
||||
jobs:
|
||||
include:
|
||||
# Run 3 image-build-push tasks in parallel for linux/amd64, linux/s390x and linux/ppc64le platforms (for upstream and stable)
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for amd64
|
||||
arch: amd64
|
||||
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for s390x
|
||||
arch: s390x
|
||||
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for ppc64le
|
||||
arch: ppc64le
|
||||
|
||||
# Run final task to generate multi-arch image manifests (for upstream and stable)
|
||||
- stage: manifest-multiarch-push
|
||||
os: linux
|
||||
dist: focal
|
||||
script:
|
||||
- make -f release/Makefile push-manifest-multiarch/upstream
|
||||
- make -f release/Makefile push-manifest-multiarch/stable
|
||||
@@ -1,3 +1,3 @@
|
||||
## The skopeo Project Community Code of Conduct
|
||||
|
||||
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
|
||||
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
||||
|
||||
@@ -117,29 +117,31 @@ commit automatically with `git commit -s`.
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
Dependencies are managed via [standard go modules](https://golang.org/ref/mod).
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- use `go get -d path/to/dep@version` to add a new line to `go.mod`
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- use `go get -d -u path/to/dep@version` to update the relevant dependency line in `go.mod`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- find out the version of `containers/image` you want to use and note its commit ID. You might also want to use a fork of `containers/image`, in that case note its repo
|
||||
- use `go get -d github.com/$REPO/image/v5@$COMMIT_ID` to download the right version. The command will fetch the dependency and then fail because of a conflict in `go.mod`, this is expected. Note the pseudo-version (eg. `v5.13.1-0.20210707123201-50afbf0a326`)
|
||||
- use `go mod edit -replace=github.com/containers/image/v5=github.com/$REPO/image/v5@$PSEUDO_VERSION` to add a replacement line to `go.mod` (e.g. `replace github.com/containers/image/v5 => github.com/moio/image/v5 v5.13.1-0.20210707123201-50afbf0a3262`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now required by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- as soon as possible after that, in the skopeo PR, use `go mod edit -dropreplace=github.com/containers/image` to remove the `replace` line in `go.mod`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete successfully again, merge the skopeo PR
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM fedora
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2man \
|
||||
# storage deps
|
||||
@@ -21,6 +21,7 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2ma
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
RUN set -x \
|
||||
&& export GO111MODULE=off \
|
||||
&& REGISTRY_COMMIT_SCHEMA1=ec87e9b6971d831f0eff752ddb54fb64693e51cd \
|
||||
&& REGISTRY_COMMIT=47a064d4195a9b56133891bbb13620c3ac83a827 \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
@@ -34,6 +35,7 @@ RUN set -x \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
RUN set -x \
|
||||
&& export GO111MODULE=off \
|
||||
&& export GOPATH=$(mktemp -d) \
|
||||
&& git clone --depth 1 -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
# The sed edits out a "go < 1.5" check which works incorrectly with go ≥ 1.10. \
|
||||
|
||||
47
Makefile
47
Makefile
@@ -10,12 +10,12 @@ export GOPROXY=https://proxy.golang.org
|
||||
# (and the user will probably find out because the cgo compilation will fail).
|
||||
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
|
||||
|
||||
# Normally empty, DESTDIR can be used to relocate the entire install-tree
|
||||
# The following variables very roughly follow https://www.gnu.org/prep/standards/standards.html#Makefile-Conventions .
|
||||
DESTDIR ?=
|
||||
CONTAINERSCONFDIR ?= ${DESTDIR}/etc/containers
|
||||
PREFIX ?= /usr/local
|
||||
CONTAINERSCONFDIR ?= /etc/containers
|
||||
REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d
|
||||
SIGSTOREDIR ?= ${DESTDIR}/var/lib/containers/sigstore
|
||||
PREFIX ?= ${DESTDIR}/usr/local
|
||||
SIGSTOREDIR ?= /var/lib/containers/sigstore
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man
|
||||
BASHCOMPLETIONSDIR ?= ${PREFIX}/share/bash-completion/completions
|
||||
@@ -116,10 +116,10 @@ binary: cmd/skopeo
|
||||
.PHONY: nixpkgs
|
||||
nixpkgs:
|
||||
@nix run \
|
||||
-f channel:nixos-20.09 nix-prefetch-git \
|
||||
-f channel:nixos-21.05 nix-prefetch-git \
|
||||
-c nix-prefetch-git \
|
||||
--no-deepClone \
|
||||
https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
|
||||
https://github.com/nixos/nixpkgs refs/heads/nixos-21.05 > nix/nixpkgs.json
|
||||
|
||||
# Build statically linked binary
|
||||
.PHONY: static
|
||||
@@ -153,23 +153,23 @@ clean:
|
||||
rm -rf bin docs/*.1
|
||||
|
||||
install: install-binary install-docs install-completions
|
||||
install -d -m 755 ${SIGSTOREDIR}
|
||||
install -d -m 755 ${CONTAINERSCONFDIR}
|
||||
install -m 644 default-policy.json ${CONTAINERSCONFDIR}/policy.json
|
||||
install -d -m 755 ${REGISTRIESDDIR}
|
||||
install -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
|
||||
install -d -m 755 ${DESTDIR}${SIGSTOREDIR}
|
||||
install -d -m 755 ${DESTDIR}${CONTAINERSCONFDIR}
|
||||
install -m 644 default-policy.json ${DESTDIR}${CONTAINERSCONFDIR}/policy.json
|
||||
install -d -m 755 ${DESTDIR}${REGISTRIESDDIR}
|
||||
install -m 644 default.yaml ${DESTDIR}${REGISTRIESDDIR}/default.yaml
|
||||
|
||||
install-binary: bin/skopeo
|
||||
install -d -m 755 ${BINDIR}
|
||||
install -m 755 bin/skopeo ${BINDIR}/skopeo
|
||||
install -d -m 755 ${DESTDIR}${BINDIR}
|
||||
install -m 755 bin/skopeo ${DESTDIR}${BINDIR}/skopeo
|
||||
|
||||
install-docs: docs
|
||||
install -d -m 755 ${MANDIR}/man1
|
||||
install -m 644 docs/*.1 ${MANDIR}/man1
|
||||
install -d -m 755 ${DESTDIR}${MANDIR}/man1
|
||||
install -m 644 docs/*.1 ${DESTDIR}${MANDIR}/man1
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${BASHCOMPLETIONSDIR}
|
||||
install -m 644 completions/bash/skopeo ${BASHCOMPLETIONSDIR}/skopeo
|
||||
install -m 755 -d ${DESTDIR}${BASHCOMPLETIONSDIR}
|
||||
install -m 644 completions/bash/skopeo ${DESTDIR}${BASHCOMPLETIONSDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(CONTAINER_RUN) bash
|
||||
@@ -204,14 +204,21 @@ test-unit: build-container
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
$(CONTAINER_RUN) make validate-local
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
test-all-local: validate-local validate-docs test-unit-local
|
||||
|
||||
.PHONY: validate-local
|
||||
validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
# This invokes bin/skopeo, hence cannot be run as part of validate-local
|
||||
.PHONY: validate-docs
|
||||
validate-docs:
|
||||
hack/man-page-checker
|
||||
hack/xref-helpmsgs-manpages
|
||||
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
@@ -221,4 +228,4 @@ vendor:
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor
|
||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
## Security and Disclosure Information Policy for the skopeo Project
|
||||
|
||||
The skopeo Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
|
||||
The skopeo Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
|
||||
|
||||
@@ -19,31 +19,34 @@ import (
|
||||
)
|
||||
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
digestFile string // Write digest to this file
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
global *globalOptions
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
digestFile string // Write digest to this file
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
deprecatedTLSVerifyFlags, deprecatedTLSVerifyOpt := deprecatedTLSVerifyFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "dest-", "dcreds")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := copyOptions{global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
retryOpts: retryOpts,
|
||||
deprecatedTLSVerify: deprecatedTLSVerifyOpt,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "copy [command options] SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
@@ -61,6 +64,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&deprecatedTLSVerifyFlags)
|
||||
flags.AddFlagSet(&srcFlags)
|
||||
flags.AddFlagSet(&destFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
@@ -70,7 +74,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)`)
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
flags.IntSliceVar(&opts.encryptLayer, "encrypt-layer", []int{}, "*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)")
|
||||
flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", []string{}, "*Experimental* key needed to decrypt the image")
|
||||
@@ -81,6 +85,7 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
opts.deprecatedTLSVerify.warnIfUsed([]string{"--src-tls-verify", "--dest-tls-verify"})
|
||||
imageNames := args
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
|
||||
|
||||
@@ -20,7 +20,7 @@ type deleteOptions struct {
|
||||
|
||||
func deleteCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := deleteOptions{
|
||||
global: global,
|
||||
|
||||
@@ -34,7 +34,7 @@ type inspectOptions struct {
|
||||
|
||||
func inspectCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := inspectOptions{
|
||||
global: global,
|
||||
@@ -222,13 +222,6 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return printTmpl(row, data)
|
||||
}
|
||||
|
||||
func inspectNormalize(row string) string {
|
||||
r := strings.NewReplacer(
|
||||
".ImageID", ".Image",
|
||||
)
|
||||
return r.Replace(row)
|
||||
}
|
||||
|
||||
func printTmpl(row string, data []interface{}) error {
|
||||
t, err := template.New("skopeo inspect").Parse(row)
|
||||
if err != nil {
|
||||
|
||||
@@ -25,7 +25,7 @@ type layersOptions struct {
|
||||
|
||||
func layersCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
opts := layersOptions{
|
||||
global: global,
|
||||
|
||||
@@ -30,7 +30,7 @@ type tagsOptions struct {
|
||||
|
||||
func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, "", "")
|
||||
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, nil, "", "")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
|
||||
opts := tagsOptions{
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Tests the kinds of inputs allowed and expected to the command
|
||||
@@ -17,10 +18,10 @@ func TestDockerRepositoryReferenceParser(t *testing.T) {
|
||||
} {
|
||||
|
||||
ref, err := parseDockerRepositoryReference(test[0])
|
||||
require.NoError(t, err)
|
||||
expected, err := alltransports.ParseImageName(test[0])
|
||||
if assert.NoError(t, err, "Could not parse, got error on %v", test[0]) {
|
||||
assert.Equal(t, expected.DockerReference().Name(), ref.DockerReference().Name(), "Mismatched parse result for input %v", test[0])
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected.DockerReference().Name(), ref.DockerReference().Name(), "Mismatched parse result for input %v", test[0])
|
||||
}
|
||||
|
||||
for _, test := range [][]string{
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
type loginOptions struct {
|
||||
global *globalOptions
|
||||
loginOpts auth.LoginOptions
|
||||
getLogin optionalBool
|
||||
tlsVerify optionalBool
|
||||
}
|
||||
|
||||
@@ -21,7 +20,7 @@ func loginCmd(global *globalOptions) *cobra.Command {
|
||||
global: global,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "login",
|
||||
Use: "login [command options] REGISTRY",
|
||||
Short: "Login to a container registry",
|
||||
Long: "Login to a container registry on a specified server.",
|
||||
RunE: commandAction(opts.run),
|
||||
@@ -39,6 +38,7 @@ func (opts *loginOptions) run(args []string, stdout io.Writer) error {
|
||||
defer cancel()
|
||||
opts.loginOpts.Stdout = stdout
|
||||
opts.loginOpts.Stdin = os.Stdin
|
||||
opts.loginOpts.AcceptRepositories = true
|
||||
sys := opts.global.newSystemContext()
|
||||
if opts.tlsVerify.present {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/common/pkg/auth"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type logoutOptions struct {
|
||||
global *globalOptions
|
||||
logoutOpts auth.LogoutOptions
|
||||
tlsVerify optionalBool
|
||||
}
|
||||
|
||||
func logoutCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -17,19 +19,25 @@ func logoutCmd(global *globalOptions) *cobra.Command {
|
||||
global: global,
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "logout",
|
||||
Use: "logout [command options] REGISTRY",
|
||||
Short: "Logout of a container registry",
|
||||
Long: "Logout of a container registry on a specified server.",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo logout quay.io`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
cmd.Flags().AddFlagSet(auth.GetLogoutFlags(&opts.logoutOpts))
|
||||
flags := cmd.Flags()
|
||||
optionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
flags.AddFlagSet(auth.GetLogoutFlags(&opts.logoutOpts))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *logoutOptions) run(args []string, stdout io.Writer) error {
|
||||
opts.logoutOpts.Stdout = stdout
|
||||
opts.logoutOpts.AcceptRepositories = true
|
||||
sys := opts.global.newSystemContext()
|
||||
if opts.tlsVerify.present {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
return auth.Logout(sys, &opts.logoutOpts, args)
|
||||
}
|
||||
|
||||
@@ -45,6 +45,17 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
},
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
// Currently, skopeo uses manually written completions. Cobra allows
|
||||
// for auto-generating completions for various shells. Podman is
|
||||
// already making us of that. If Skopeo decides to follow, please
|
||||
// remove the line below (and hide the `completion` command).
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
// This is documented to parse "local" (non-PersistentFlags) flags of parent commands before
|
||||
// running subcommands and handling their options. We don't really run into such cases,
|
||||
// because all of our flags on rootCommand are in PersistentFlags, except for the deprecated --tls-verify;
|
||||
// in that case we need TraverseChildren so that we can distinguish between
|
||||
// (skopeo --tls-verify inspect) (causes a warning) and (skopeo inspect --tls-verify) (no warning).
|
||||
TraverseChildren: true,
|
||||
}
|
||||
if gitCommit != "" {
|
||||
rootCommand.Version = fmt.Sprintf("%s commit: %s", version.Version, gitCommit)
|
||||
@@ -55,8 +66,6 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
var dummyVersion bool
|
||||
rootCommand.Flags().BoolVarP(&dummyVersion, "version", "v", false, "Version for Skopeo")
|
||||
rootCommand.PersistentFlags().BoolVar(&opts.debug, "debug", false, "enable debug output")
|
||||
flag := optionalBoolFlag(rootCommand.PersistentFlags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
|
||||
flag.Hidden = true
|
||||
rootCommand.PersistentFlags().StringVar(&opts.policyPath, "policy", "", "Path to a trust policy file")
|
||||
rootCommand.PersistentFlags().BoolVar(&opts.insecurePolicy, "insecure-policy", false, "run the tool without any policy check")
|
||||
rootCommand.PersistentFlags().StringVar(&opts.registriesDirPath, "registries.d", "", "use registry configuration files in `DIR` (e.g. for container signature storage)")
|
||||
@@ -69,6 +78,8 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
logrus.Fatal("unable to mark registries-conf flag as hidden")
|
||||
}
|
||||
rootCommand.PersistentFlags().StringVar(&opts.tmpDir, "tmpdir", "", "directory used to store temporary files")
|
||||
flag := optionalBoolFlag(rootCommand.Flags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
|
||||
flag.Hidden = true
|
||||
rootCommand.AddCommand(
|
||||
copyCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
|
||||
@@ -16,7 +16,7 @@ type manifestDigestOptions struct {
|
||||
func manifestDigestCmd() *cobra.Command {
|
||||
var opts manifestDigestOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "manifest-digest MANIFEST",
|
||||
Use: "manifest-digest MANIFEST-FILE",
|
||||
Short: "Compute a manifest digest of a file",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: "skopeo manifest-digest manifest.json",
|
||||
|
||||
@@ -18,7 +18,7 @@ type standaloneSignOptions struct {
|
||||
func standaloneSignCmd() *cobra.Command {
|
||||
opts := standaloneSignOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "standalone-sign [command options] MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Use: "standalone-sign [command options] MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT --output|-o SIGNATURE",
|
||||
Short: "Create a signature using local files",
|
||||
RunE: commandAction(opts.run),
|
||||
}
|
||||
|
||||
@@ -27,17 +27,18 @@ import (
|
||||
|
||||
// syncOptions contains information retrieved from the skopeo sync command line.
|
||||
type syncOptions struct {
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
global *globalOptions // Global (not command dependent) skopeo options
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
@@ -68,27 +69,29 @@ type sourceConfig map[string]registrySyncConfig
|
||||
|
||||
func syncCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := dockerImageFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
deprecatedTLSVerifyFlags, deprecatedTLSVerifyOpt := deprecatedTLSVerifyFlags()
|
||||
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "src-", "screds")
|
||||
destFlags, destOpts := dockerImageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "dest-", "dcreds")
|
||||
retryFlags, retryOpts := retryFlags()
|
||||
|
||||
opts := syncOptions{
|
||||
global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: &imageDestOptions{imageOptions: destOpts},
|
||||
retryOpts: retryOpts,
|
||||
global: global,
|
||||
deprecatedTLSVerify: deprecatedTLSVerifyOpt,
|
||||
srcImage: srcOpts,
|
||||
destImage: &imageDestOptions{imageOptions: destOpts},
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "sync [command options] --src SOURCE-LOCATION --dest DESTINATION-LOCATION SOURCE DESTINATION",
|
||||
Use: "sync [command options] --src TRANSPORT --dest TRANSPORT SOURCE DESTINATION",
|
||||
Short: "Synchronize one or more images from one location to another",
|
||||
Long: fmt.Sprint(`Copy all the images from a SOURCE to a DESTINATION.
|
||||
Long: `Copy all the images from a SOURCE to a DESTINATION.
|
||||
|
||||
Allowed SOURCE transports (specified with --src): docker, dir, yaml.
|
||||
Allowed DESTINATION transports (specified with --dest): docker, dir.
|
||||
|
||||
See skopeo-sync(1) for details.
|
||||
`),
|
||||
`,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb`,
|
||||
}
|
||||
@@ -96,12 +99,13 @@ See skopeo-sync(1) for details.
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source)`)
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&deprecatedTLSVerifyFlags)
|
||||
flags.AddFlagSet(&srcFlags)
|
||||
flags.AddFlagSet(&destFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
@@ -207,7 +211,6 @@ func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef refe
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
break
|
||||
default:
|
||||
return tags, errors.Wrapf(err, "Error determining repository tags for image %s", name)
|
||||
}
|
||||
@@ -493,6 +496,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
opts.deprecatedTLSVerify.warnIfUsed([]string{"--src-tls-verify", "--dest-tls-verify"})
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
|
||||
@@ -2,10 +2,6 @@
|
||||
|
||||
package main
|
||||
|
||||
func maybeReexec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(inputImageNames ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -38,6 +39,35 @@ func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd
|
||||
}
|
||||
}
|
||||
|
||||
// deprecatedTLSVerifyOption represents a deprecated --tls-verify option,
|
||||
// which was accepted for all subcommands, for a time.
|
||||
// Every user should call deprecatedTLSVerifyOption.warnIfUsed() as part of handling the CLI,
|
||||
// whether or not the value actually ends up being used.
|
||||
// DO NOT ADD ANY NEW USES OF THIS; just call dockerImageFlags with an appropriate, possibly empty, flagPrefix.
|
||||
type deprecatedTLSVerifyOption struct {
|
||||
tlsVerify optionalBool // FIXME FIXME: Warn if this is used, or even if it is ignored.
|
||||
}
|
||||
|
||||
// warnIfUsed warns if tlsVerify was set by the user, and suggests alternatives (which should
|
||||
// start with "--").
|
||||
// Every user should call this as part of handling the CLI, whether or not the value actually
|
||||
// ends up being used.
|
||||
func (opts *deprecatedTLSVerifyOption) warnIfUsed(alternatives []string) {
|
||||
if opts.tlsVerify.present {
|
||||
logrus.Warnf("'--tls-verify' is deprecated, instead use: %s", strings.Join(alternatives, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// deprecatedTLSVerifyFlags prepares the CLI flag writing into deprecatedTLSVerifyOption, and the managed deprecatedTLSVerifyOption structure.
|
||||
// DO NOT ADD ANY NEW USES OF THIS; just call dockerImageFlags with an appropriate, possibly empty, flagPrefix.
|
||||
func deprecatedTLSVerifyFlags() (pflag.FlagSet, *deprecatedTLSVerifyOption) {
|
||||
opts := deprecatedTLSVerifyOption{}
|
||||
fs := pflag.FlagSet{}
|
||||
flag := optionalBoolFlag(&fs, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the container registry (defaults to true)")
|
||||
flag.Hidden = true
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
|
||||
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
|
||||
type sharedImageOptions struct {
|
||||
@@ -56,14 +86,15 @@ func sharedImageFlags() (pflag.FlagSet, *sharedImageOptions) {
|
||||
// the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type dockerImageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
noCreds bool // Access the registry anonymously
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption // May be shared across several imageOptions instances, or nil.
|
||||
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
|
||||
@@ -76,11 +107,12 @@ type imageOptions struct {
|
||||
|
||||
// dockerImageFlags prepares a collection of docker-transport specific CLI flags
|
||||
// writing into imageOptions, and the managed imageOptions structure.
|
||||
func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
flags := imageOptions{
|
||||
dockerImageOptions: dockerImageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
global: global,
|
||||
shared: shared,
|
||||
deprecatedTLSVerify: deprecatedTLSVerify,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -104,8 +136,8 @@ func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, flagPre
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
dockerFlags, opts := dockerImageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
|
||||
dockerFlags, opts := dockerImageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
|
||||
fs := pflag.FlagSet{}
|
||||
fs.StringVar(&opts.sharedBlobDir, flagPrefix+"shared-blob-dir", "", "`DIRECTORY` to use to share blobs across OCI repositories")
|
||||
@@ -114,10 +146,6 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, c
|
||||
return fs, opts
|
||||
}
|
||||
|
||||
type retryOptions struct {
|
||||
maxRetry int // The number of times to possibly retry
|
||||
}
|
||||
|
||||
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
|
||||
opts := retry.RetryOptions{}
|
||||
fs := pflag.FlagSet{}
|
||||
@@ -139,6 +167,10 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
if opts.dockerImageOptions.authFilePath.present {
|
||||
ctx.AuthFilePath = opts.dockerImageOptions.authFilePath.value
|
||||
}
|
||||
if opts.deprecatedTLSVerify != nil && opts.deprecatedTLSVerify.tlsVerify.present {
|
||||
// If both this deprecated option and a non-deprecated option is present, we use the latter value.
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.deprecatedTLSVerify.tlsVerify.value)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
}
|
||||
@@ -169,18 +201,20 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
dirForceDecompression bool // Decompress layers when saving to the dir: transport
|
||||
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel optionalInt // Level to use for the compression
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
fs.BoolVar(&opts.dirForceDecompression, flagPrefix+"decompress", false, "Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
fs.BoolVar(&opts.ociAcceptUncompressedLayers, flagPrefix+"oci-accept-uncompressed-layers", false, "Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)")
|
||||
fs.StringVar(&opts.compressionFormat, flagPrefix+"compress-format", "", "`FORMAT` to use for the compression")
|
||||
fs.Var(newOptionalIntValue(&opts.compressionLevel), flagPrefix+"compress-level", "`LEVEL` to use for the compression")
|
||||
@@ -196,6 +230,7 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
}
|
||||
|
||||
ctx.DirForceCompress = opts.dirForceCompression
|
||||
ctx.DirForceDecompress = opts.dirForceDecompression
|
||||
ctx.OCIAcceptUncompressedLayers = opts.ociAcceptUncompressedLayers
|
||||
if opts.compressionFormat != "" {
|
||||
cf, err := compression.AlgorithmByName(opts.compressionFormat)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -17,17 +18,26 @@ func fakeGlobalOptions(t *testing.T, flags []string) (*globalOptions, *cobra.Com
|
||||
app, opts := createApp()
|
||||
cmd := &cobra.Command{}
|
||||
app.AddCommand(cmd)
|
||||
err := cmd.ParseFlags(flags)
|
||||
err := app.ParseFlags(flags)
|
||||
require.NoError(t, err)
|
||||
return opts, cmd
|
||||
}
|
||||
|
||||
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerify bool,
|
||||
globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
var deprecatedTLSVerifyFlag pflag.FlagSet
|
||||
var deprecatedTLSVerifyOpt *deprecatedTLSVerifyOption
|
||||
if useDeprecatedTLSVerify {
|
||||
deprecatedTLSVerifyFlag, deprecatedTLSVerifyOpt = deprecatedTLSVerifyFlags()
|
||||
}
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, deprecatedTLSVerifyOpt, flagPrefix, "")
|
||||
cmd.Flags().AddFlagSet(&sharedFlags)
|
||||
if useDeprecatedTLSVerify {
|
||||
cmd.Flags().AddFlagSet(&deprecatedTLSVerifyFlag)
|
||||
}
|
||||
cmd.Flags().AddFlagSet(&imageFlags)
|
||||
err := cmd.ParseFlags(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
@@ -36,7 +46,7 @@ func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmd
|
||||
|
||||
func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
opts := fakeImageOptions(t, "dest-", true, []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
@@ -44,7 +54,7 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
opts = fakeImageOptions(t, "dest-", true, []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
@@ -80,49 +90,29 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior
|
||||
for _, c := range []struct {
|
||||
global, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", types.OptionalBoolUndefined, false},
|
||||
{"", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
// Global/per-command tlsVerify behavior is tested in TestTLSVerifyFlags.
|
||||
|
||||
// Invalid option values
|
||||
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
opts = fakeImageOptions(t, "dest-", true, []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerify bool,
|
||||
globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
var deprecatedTLSVerifyFlag pflag.FlagSet
|
||||
var deprecatedTLSVerifyOpt *deprecatedTLSVerifyOption
|
||||
if useDeprecatedTLSVerify {
|
||||
deprecatedTLSVerifyFlag, deprecatedTLSVerifyOpt = deprecatedTLSVerifyFlags()
|
||||
}
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, deprecatedTLSVerifyOpt, flagPrefix, "")
|
||||
cmd.Flags().AddFlagSet(&sharedFlags)
|
||||
if useDeprecatedTLSVerify {
|
||||
cmd.Flags().AddFlagSet(&deprecatedTLSVerifyFlag)
|
||||
}
|
||||
cmd.Flags().AddFlagSet(&imageFlags)
|
||||
err := cmd.ParseFlags(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
@@ -131,7 +121,7 @@ func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string,
|
||||
|
||||
func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
opts := fakeImageDestOptions(t, "dest-", true, []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
@@ -151,7 +141,7 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
os.Setenv("REGISTRY_AUTH_FILE", authFile)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{
|
||||
"--dest-compress=false",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
@@ -162,7 +152,7 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
@@ -199,12 +189,104 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior is tested in TestTLSVerifyFlags.
|
||||
|
||||
// Invalid option values in imageOptions
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestTLSVerifyFlags(t *testing.T) {
|
||||
type systemContextOpts interface { // Either *imageOptions or *imageDestOptions
|
||||
newSystemContext() (*types.SystemContext, error)
|
||||
}
|
||||
|
||||
for _, creator := range []struct {
|
||||
name string
|
||||
newOpts func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts
|
||||
}{
|
||||
{
|
||||
"imageFlags",
|
||||
func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts {
|
||||
return fakeImageOptions(t, "dest-", useDeprecatedTLSVerify, globalFlags, cmdFlags)
|
||||
},
|
||||
},
|
||||
{
|
||||
"imageDestFlags",
|
||||
func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts {
|
||||
return fakeImageDestOptions(t, "dest-", useDeprecatedTLSVerify, globalFlags, cmdFlags)
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(creator.name, func(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
global, deprecatedCmd, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", "", types.OptionalBoolUndefined, false},
|
||||
{"", "", "false", types.OptionalBoolTrue, true},
|
||||
{"", "", "true", types.OptionalBoolFalse, false},
|
||||
{"", "false", "", types.OptionalBoolTrue, false},
|
||||
{"", "false", "false", types.OptionalBoolTrue, true},
|
||||
{"", "false", "true", types.OptionalBoolFalse, false},
|
||||
{"", "true", "", types.OptionalBoolFalse, false},
|
||||
{"", "true", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", "", types.OptionalBoolTrue, false},
|
||||
{"false", "", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "false", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "true", "", types.OptionalBoolFalse, false},
|
||||
{"false", "true", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", "", types.OptionalBoolFalse, false},
|
||||
{"true", "", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "false", "", types.OptionalBoolTrue, false},
|
||||
{"true", "false", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.deprecatedCmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--tls-verify="+c.deprecatedCmd)
|
||||
}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := creator.newOpts(true, globalFlags, cmdFlags)
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
|
||||
if c.deprecatedCmd == "" { // Test also the behavior when deprecatedTLSFlag is not recognized
|
||||
// Use globalFlags from the previous test
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := creator.newOpts(false, globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseManifestFormat(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
formatParam string
|
||||
@@ -271,7 +353,7 @@ func TestImageOptionsAuthfileOverride(t *testing.T) {
|
||||
}, "/srv/dest-authfile",
|
||||
},
|
||||
} {
|
||||
opts := fakeImageOptions(t, testCase.flagPrefix, []string{}, testCase.cmdFlags)
|
||||
opts := fakeImageOptions(t, testCase.flagPrefix, false, []string{}, testCase.cmdFlags)
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ _skopeo_copy() {
|
||||
local boolean_options="
|
||||
--all
|
||||
--dest-compress
|
||||
--dest-decompress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
|
||||
@@ -54,15 +54,19 @@ _run_vendor() {
|
||||
}
|
||||
|
||||
_run_build() {
|
||||
podmanmake bin/skopeo BUILDTAGS="$BUILDTAGS"
|
||||
make bin/skopeo BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_validate() {
|
||||
podmanmake validate-local BUILDTAGS="$BUILDTAGS"
|
||||
_run_cross() {
|
||||
podmanmake local-cross BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_doccheck() {
|
||||
make validate-docs BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_unit() {
|
||||
podmanmake test-integration-local BUILDTAGS="$BUILDTAGS"
|
||||
podmanmake test-unit-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_integration() {
|
||||
|
||||
@@ -6,22 +6,35 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains the Dockerfiles necessary to create the three skopeoimage container
|
||||
images that are housed on quay.io under the skopeo account. All three repositories where
|
||||
the images live are public and can be pulled without credentials. These container images
|
||||
are secured and the resulting containers can run safely. The container images are built
|
||||
using the latest Fedora and then Skopeo is installed into them:
|
||||
This directory contains the Dockerfiles necessary to create the skopeoimage container
|
||||
images that are housed on quay.io under the skopeo account. All repositories where
|
||||
the images live are public and can be pulled without credentials. These container images are secured and the
|
||||
resulting containers can run safely with privileges within the container.
|
||||
|
||||
* quay.io/skopeo/stable - This image is built using the latest stable version of Skopeo in a Fedora based container. Built with skopeoimage/stable/Dockerfile.
|
||||
* quay.io/skopeo/upstream - This image is built using the latest code found in this GitHub repository. When someone creates a commit and pushes it, the image is created. Due to that the image changes frequently and is not guaranteed to be stable. Built with skopeoimage/upstream/Dockerfile.
|
||||
* quay.io/skopeo/testing - This image is built using the latest version of Skopeo that is or was in updates testing for Fedora. At times this may be the same as the stable image. This container image will primarily be used by the development teams for verification testing when a new package is created. Built with skopeoimage/testing/Dockerfile.
|
||||
The container images are built using the latest Fedora and then Skopeo is installed into them.
|
||||
The PATH in the container images is set to the default PATH provided by Fedora. Also, the
|
||||
ENTRYPOINT and the WORKDIR variables are not set within these container images, as such they
|
||||
default to `/`.
|
||||
|
||||
## Multiarch images
|
||||
The container images are:
|
||||
|
||||
Multiarch images are available for Skopeo upstream and stable versions. Supported architectures are `amd64`, `s390x`, `ppc64le`.
|
||||
Available images are `quay.io/skopeo/upstream:master`, `quay.io/skopeo/stable:v1.2.0`, `quay.io/containers/skopeo:v1.2.0`.
|
||||
* `quay.io/containers/skopeo:<version>` and `quay.io/skopeo/stable:<version>` -
|
||||
These images are built when a new Skopeo version becomes available in
|
||||
Fedora. These images are intended to be unchanging and stable, they will
|
||||
never be updated by automation once they've been pushed. For build details,
|
||||
please [see the configuration file](stable/Dockerfile).
|
||||
* `quay.io/containers/skopeo:latest` and `quay.io/skopeo/stable:latest` -
|
||||
Built daily using the same Dockerfile as above. The skopeo version
|
||||
will remain the "latest" available in Fedora, however the image
|
||||
contents may vary compared to the version-tagged images.
|
||||
* `quay.io/skopeo/testing:latest` - This image is built daily, using the
|
||||
latest version of Skopeo that was in the Fedora `updates-testing` repository.
|
||||
The image is Built with [the testing Dockerfile](testing/Dockerfile).
|
||||
* `quay.io/skopeo/upstream:latest` - This image is built daily using the latest
|
||||
code found in this GitHub repository. Due to the image changing frequently,
|
||||
it's not guaranteed to be stable or even executable. The image is built with
|
||||
[the upstream Dockerfile](upstream/Dockerfile).
|
||||
|
||||
Images can be used the same way as in a single architecture case, no extra setup is required. For samples see next chapter.
|
||||
|
||||
## Sample Usage
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
**skopeo copy** [*options*] _source-image_ _destination-image_
|
||||
|
||||
## DESCRIPTION
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
@@ -20,7 +20,11 @@ automatically inherit any parts of the source name.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all**
|
||||
**--additional-tag**=_strings_
|
||||
|
||||
Additional tags (supports docker-archive).
|
||||
|
||||
**--all**, **-a**
|
||||
|
||||
If _source-image_ refers to a list of images, instead of copying just the image which matches the current OS and
|
||||
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
|
||||
@@ -42,55 +46,123 @@ Path of the authentication file for the source registry. Uses path given by `--a
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dest-shared-blob-dir** _directory_
|
||||
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--digestfile** _path_
|
||||
|
||||
After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
**--encrypt-layer** _ints_
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
**--format**, **-f** _manifest-type_
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)
|
||||
|
||||
**--encryption-key** _protocol:keyfile_ specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
|
||||
**--help**, **-h**
|
||||
|
||||
**--decryption-key** _key[:passphrase]_ to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
|
||||
Print usage statement
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry.
|
||||
**--quiet**, **-q**
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
Suppress output information when copying images.
|
||||
|
||||
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
|
||||
**--remove-signatures**
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry.
|
||||
Do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
|
||||
**--sign-by**=_key-id_
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
Add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
|
||||
**--src-shared-blob-dir** _directory_
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
**--encryption-key** _protocol:keyfile_
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
|
||||
Specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
**--decryption-key** _key[:passphrase]_
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
Key to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
|
||||
|
||||
**--src-creds** _username[:password]_
|
||||
|
||||
Credentials for accessing the source registry.
|
||||
|
||||
**--dest-compress** _bool-value_
|
||||
|
||||
Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
|
||||
**--dest-decompress** _bool-value_
|
||||
|
||||
Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
|
||||
|
||||
**--dest-oci-accept-uncompressed-layers** _bool-value_
|
||||
|
||||
Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
|
||||
|
||||
**--dest-creds** _username[:password]_
|
||||
|
||||
Credentials for accessing the destination registry.
|
||||
|
||||
**--src-cert-dir** _path_
|
||||
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
|
||||
|
||||
**--src-no-creds** _bool-value_
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_
|
||||
|
||||
Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
|
||||
|
||||
**--dest-cert-dir** _path_
|
||||
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
|
||||
|
||||
**--dest-no-creds** _bool-value_
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--dest-tls-verify** _bool-value_
|
||||
|
||||
Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
|
||||
|
||||
**--src-daemon-host** _host_
|
||||
|
||||
Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_
|
||||
|
||||
Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
**--dest-compress-format** _format_ Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
|
||||
**--dest-compress-format** _format_
|
||||
|
||||
**--dest-compress-level** _format_ Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
|
||||
Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
|
||||
|
||||
**--src-registry-token** _Bearer token_ for accessing the source registry.
|
||||
**--dest-compress-level** _format_
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
|
||||
|
||||
**--src-registry-token** _token_
|
||||
|
||||
Bearer token for accessing the source registry.
|
||||
|
||||
**--dest-registry-token** _token_
|
||||
|
||||
Bearer token for accessing the destination registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -148,9 +220,8 @@ skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5)
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5), containers-signature(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garbage collector.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
**skopeo delete** [*options*] _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
@@ -19,22 +19,50 @@ $ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distrib
|
||||
|
||||
```
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry.
|
||||
**--creds** _username[:password]_
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Credentials for accessing the registry.
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
**--cert-dir** _path_
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
|
||||
|
||||
**--daemon-host** _host_
|
||||
|
||||
Use docker daemon host at _host_ (`docker-daemon:` transport only)
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--no-creds** _bool-value_
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
**--registry-token** _Bearer token_ for accessing the registry.
|
||||
**--registry-token** _token_
|
||||
|
||||
Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -51,4 +79,3 @@ skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -31,11 +31,19 @@ Output configuration in OCI format, default is to format in JSON format.
|
||||
|
||||
Username and password for accessing the registry.
|
||||
|
||||
**--daemon-host** _host_
|
||||
|
||||
Use docker daemon host at _host_ (`docker-daemon:` transport only)
|
||||
|
||||
**--format**, **-f**=*format*
|
||||
|
||||
Format the output using the given Go template.
|
||||
The keys of the returned JSON can be used as the values for the --format flag (see examples below).
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--no-creds**
|
||||
|
||||
Access the registry anonymously.
|
||||
@@ -53,9 +61,13 @@ Registry token for accessing the registry.
|
||||
|
||||
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
|
||||
@@ -1,29 +1,47 @@
|
||||
% skopeo-list-tags(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-list\-tags - Return a list of tags for the transport-specific image repository.
|
||||
skopeo\-list\-tags - List tags in the transport-specific image repository.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo list-tags** _repository-name_
|
||||
**skopeo list-tags** [*options*] _repository-name_
|
||||
|
||||
Return a list of tags from _repository-name_ in a registry.
|
||||
|
||||
_repository-name_ name of repository to retrieve tag listing from
|
||||
|
||||
**--authfile** _path_
|
||||
## OPTIONS
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry.
|
||||
**--creds** _username[:password]_ for accessing the registry.
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
**--cert-dir** _path_
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
**--help**, **-h**
|
||||
|
||||
**--registry-token** _Bearer token_ for accessing the registry.
|
||||
Print usage statement
|
||||
|
||||
**--no-creds** _bool-value_
|
||||
|
||||
Access the registry anonymously.
|
||||
|
||||
**--registry-token** _Bearer token_
|
||||
|
||||
Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
|
||||
|
||||
## REPOSITORY NAMES
|
||||
|
||||
@@ -34,18 +52,18 @@ This commands refers to repositories using a _transport_`:`_details_ format. The
|
||||
**docker://**_docker-repository-reference_
|
||||
A repository in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(skopeo login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
A _docker-repository-reference_ is of the form: **registryhost:port/repositoryname** which is similar to an _image-reference_ but with no tag or digest allowed as the last component (e.g no `:latest` or `@sha256:xyz`)
|
||||
|
||||
|
||||
Examples of valid docker-repository-references:
|
||||
"docker.io/myuser/myrepo"
|
||||
"docker.io/nginx"
|
||||
"docker.io/library/fedora"
|
||||
"localhost:5000/myrepository"
|
||||
|
||||
|
||||
Examples of invalid references:
|
||||
"docker.io/nginx:latest"
|
||||
"docker.io/myuser/myimage:v1.0"
|
||||
"docker.io/myuser/myimage@sha256:f48c4cc192f4c3c6a069cb5cca6d0a9e34d6076ba7c214fd0cc3ca60e0af76bb"
|
||||
|
||||
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -101,4 +119,3 @@ skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
## AUTHORS
|
||||
|
||||
Zach Hill <zach@anchore.com>
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
skopeo\-login - Login to a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo login** [*options*] *registry*
|
||||
**skopeo login** [*options*] _registry_
|
||||
|
||||
## DESCRIPTION
|
||||
**skopeo login** logs into a specified registry server with the correct username
|
||||
@@ -43,16 +43,18 @@ Return the logged-in user for the registry. Return error if no login is found.
|
||||
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--tls-verify**=*true|false*
|
||||
|
||||
Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
|
||||
then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
|
||||
TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
|
||||
|
||||
**--verbose**, **-v**
|
||||
|
||||
Write more detailed information to stdout
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
skopeo\-logout - Logout of a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo logout** [*options*] *registry*
|
||||
**skopeo logout** [*options*] _registry_
|
||||
|
||||
## DESCRIPTION
|
||||
**skopeo logout** logs out of a specified registry server by deleting the cached credentials
|
||||
@@ -29,6 +29,10 @@ Remove the cached credentials for all registries in the auth file
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
|
||||
@@ -10,6 +10,12 @@ skopeo\-manifest\-digest - Compute a manifest digest for a manifest-file and wri
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
@@ -23,4 +29,3 @@ skopeo(1)
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -4,11 +4,10 @@
|
||||
skopeo\-standalone-sign - Debugging tool - Publish and sign an image in one step.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
**skopeo standalone-sign** [*options*] _manifest_ _docker-reference_ _key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
This is primarily a debugging tool, useful for special cases, and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
@@ -16,7 +15,15 @@ and usually should not be a part of your normal operational workflow; use `skope
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
## OPTIONS
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--output**, **-o** _output file_
|
||||
|
||||
Write signature to _output file_.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -25,10 +32,13 @@ $ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busy
|
||||
$
|
||||
```
|
||||
|
||||
## NOTES
|
||||
|
||||
This command is intended for use with local signatures e.g. OpenPGP ( other signature formats may be added in the future ), as per containers-signature(5). Furthermore, this command does **not** interact with the artifacts generated by Docker Content Trust (DCT). For more information, please see [containers-signature(5)](https://github.com/containers/image/blob/main/docs/containers-signature.5.md).
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-copy(1), containers-signature(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -4,11 +4,13 @@
|
||||
skopeo\-standalone\-verify - Verify an image signature.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
**skopeo standalone-verify** _manifest_ _docker-reference_ _key-fingerprint_ _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
Verify a signature using local files; the digest will be printed on success. This is primarily a debugging tool, useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow. Additionally, consider configuring a signature verification policy file,
|
||||
as per containers-policy.json(5).
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
@@ -20,6 +22,12 @@ Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
@@ -27,10 +35,13 @@ $ skopeo standalone-verify busybox-manifest.json registry.example.com/example/bu
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
## NOTES
|
||||
|
||||
This command is intended for use with local signatures e.g. OpenPGP ( other signature formats may be added in the future ), as per containers-signature(5). Furthermore, this command does **not** interact with the artifacts generated by Docker Content Trust (DCT). For more information, please see [containers-signature(5)](https://github.com/containers/image/blob/main/docs/containers-signature.5.md).
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), containers-signature(5)
|
||||
skopeo(1), containers-signature(5), containers-policy.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ skopeo\-sync - Synchronize images between container registries and local directo
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** --src _transport_ --dest _transport_ _source_ _destination_
|
||||
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between container registries and local directories.
|
||||
@@ -32,7 +32,7 @@ When the `--scoped` option is specified, images are prefixed with the source ima
|
||||
name can be stored at _destination_.
|
||||
|
||||
## OPTIONS
|
||||
**--all**
|
||||
**--all**, **-a**
|
||||
If one of the images in __src__ refers to a list of images, instead of copying just the image which matches the current OS and
|
||||
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
|
||||
the images in the list, and the list itself.
|
||||
@@ -50,17 +50,21 @@ Path of the authentication file for the source registry. Uses path given by `--a
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--src** _transport_ Transport for the source repository.
|
||||
**--src**, **-s** _transport_ Transport for the source repository.
|
||||
|
||||
**--dest** _transport_ Destination transport.
|
||||
**--dest**, **-d** _transport_ Destination transport.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest Type (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source).
|
||||
**--format**, **-f** _manifest-type_ Manifest Type (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks).
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ Add a signature using that key ID for an image name corresponding to _destination-image_.
|
||||
**--sign-by**=_key-id_ Add a signature using that key ID for an image name corresponding to _destination-image_.
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry.
|
||||
|
||||
@@ -82,6 +86,8 @@ Path of the authentication file for the destination registry. Uses path given by
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
|
||||
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
### Synchronizing to a local directory
|
||||
|
||||
@@ -51,42 +51,64 @@ See [containers-transports(5)](https://github.com/containers/image/blob/master/d
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--command-timeout** _duration_ Timeout for the command execution.
|
||||
**--command-timeout** _duration_
|
||||
|
||||
**--debug** enable debug output
|
||||
Timeout for the command execution.
|
||||
|
||||
**--help**|**-h** Show help
|
||||
**--debug**
|
||||
|
||||
**--insecure-policy** Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
|
||||
enable debug output
|
||||
|
||||
**--override-arch** _arch_ Use _arch_ instead of the architecture of the machine for choosing images.
|
||||
**--help**, **-h**
|
||||
|
||||
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
|
||||
Show help
|
||||
|
||||
**--override-variant** _VARIANT_ Use _VARIANT_ instead of the running architecture variant for choosing images.
|
||||
**--insecure-policy**
|
||||
|
||||
**--policy** _path-to-policy_ Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
|
||||
Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
|
||||
|
||||
**--registries.d** _dir_ use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
|
||||
**--override-arch** _arch_
|
||||
|
||||
**--tmpdir** _dir_ used to store temporary files. Defaults to /var/tmp.
|
||||
Use _arch_ instead of the architecture of the machine for choosing images.
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
**--override-os** _os_
|
||||
|
||||
Use _OS_ instead of the running OS for choosing images.
|
||||
|
||||
**--override-variant** _variant_
|
||||
|
||||
Use _variant_ instead of the running architecture variant for choosing images.
|
||||
|
||||
**--policy** _path-to-policy_
|
||||
|
||||
Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
|
||||
|
||||
**--registries.d** _dir_
|
||||
|
||||
Use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
|
||||
|
||||
**--tmpdir** _dir_
|
||||
|
||||
Directory used to store temporary files. Defaults to /var/tmp.
|
||||
|
||||
**--version**, **-v**
|
||||
|
||||
Print the version number
|
||||
|
||||
## COMMANDS
|
||||
|
||||
| Command | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List the tags for the given transport/repository. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List tags in the transport-specific image repository. |
|
||||
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Copy images from one or more repositories to a user specified destination. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
|
||||
12
go.mod
12
go.mod
@@ -3,11 +3,11 @@ module github.com/containers/skopeo
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.38.4
|
||||
github.com/containers/image/v5 v5.12.0
|
||||
github.com/containers/ocicrypt v1.1.1
|
||||
github.com/containers/storage v1.31.1
|
||||
github.com/docker/docker v20.10.6+incompatible
|
||||
github.com/containers/common v0.42.0
|
||||
github.com/containers/image/v5 v5.15.0
|
||||
github.com/containers/ocicrypt v1.1.2
|
||||
github.com/containers/storage v1.33.1
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
@@ -16,7 +16,7 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
|
||||
285
go.sum
285
go.sum
@@ -10,18 +10,33 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
|
||||
@@ -48,8 +63,9 @@ github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB
|
||||
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
|
||||
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
@@ -57,8 +73,8 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
|
||||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.17 h1:yFHH5bghP9ij5Y34PPaMOE8g//oXZ0uJQeMENVo2zcI=
|
||||
github.com/Microsoft/hcsshim v0.8.17/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.20 h1:ZTwcx3NS8n07kPf/JZ1qwU6vnjhVPMUWlXBF8r9UxrE=
|
||||
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
@@ -66,8 +82,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -75,6 +91,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
@@ -88,7 +105,9 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
@@ -111,9 +130,12 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.6.1/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
||||
@@ -195,22 +217,24 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
|
||||
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||
github.com/containers/common v0.38.4 h1:WYv4R6Sw1qiOPZtBNbKglrmisXdPcq3fZ3bGy4prrjo=
|
||||
github.com/containers/common v0.38.4/go.mod h1:egfpX/Y3+19Dz4Wa1eRZDdgzoEOeneieF9CQppKzLBg=
|
||||
github.com/containers/image/v5 v5.12.0 h1:1hNS2QkzFQ4lH3GYQLyAXB0acRMhS1Ubm6oV++8vw4w=
|
||||
github.com/containers/image/v5 v5.12.0/go.mod h1:VasTuHmOw+uD0oHCfApQcMO2+36SfyncoSahU7513Xs=
|
||||
github.com/containers/common v0.42.0 h1:x5WgLp0QVCB76qvR3Zpwpj2rb/6BzOlOXzx1uqXReOw=
|
||||
github.com/containers/common v0.42.0/go.mod h1:UzAAjDsxwd4qkN1mgsk6aspduBY5bspxvKgwQElaBwk=
|
||||
github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU=
|
||||
github.com/containers/image/v5 v5.15.0 h1:NduhN20ptHNlf0uRny5iTJa2OodB9SLMEB4hKKbzBBs=
|
||||
github.com/containers/image/v5 v5.15.0/go.mod h1:gzdBcooi6AFdiqfzirUqv90hUyHyI0MMdaqKzACKr2s=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||
github.com/containers/ocicrypt v1.1.1 h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI=
|
||||
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/storage v1.30.1/go.mod h1:NDJkiwxnSHD1Is+4DGcyR3SIEYSDOa0xnAW+uGQFx9E=
|
||||
github.com/containers/storage v1.31.1 h1:xJedxRd4gI/7cCStZO9UVL2aFs4wjSV9Xqo3vAm2eOQ=
|
||||
github.com/containers/storage v1.31.1/go.mod h1:IFEf+yRTS0pvCGQt2tBv1Kzz2XUSPvED6uFBmWG7V/E=
|
||||
github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0=
|
||||
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw=
|
||||
github.com/containers/storage v1.33.0/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
|
||||
github.com/containers/storage v1.33.1 h1:RHUPZ7vQxwoeOoMoKUDsVun4f9Wi8BTXmr/wQiruBYU=
|
||||
github.com/containers/storage v1.33.1/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
@@ -221,7 +245,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
@@ -234,6 +258,7 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
|
||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
|
||||
github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -248,10 +273,10 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ=
|
||||
github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
|
||||
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
|
||||
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
@@ -277,6 +302,9 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@@ -337,6 +365,9 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -352,6 +383,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
@@ -362,9 +394,11 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
@@ -373,11 +407,19 @@ github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCj
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -402,6 +444,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -429,15 +472,17 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
||||
github.com/jinzhu/copier v0.3.0/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
|
||||
github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
@@ -458,14 +503,15 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
|
||||
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
|
||||
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
@@ -477,7 +523,7 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
@@ -487,11 +533,11 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
|
||||
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
|
||||
github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -508,6 +554,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
@@ -545,16 +592,16 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134=
|
||||
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.12.0 h1:p4oGGk2M2UJc0wWN4lHFvIB71lxsh0T/UiKCCgFADY8=
|
||||
github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
||||
github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI=
|
||||
github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
@@ -572,8 +619,9 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
|
||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||
github.com/opencontainers/runc v1.0.0-rc94 h1:atqAFoBGp+Wkh9HKpYN3g/8NCbMzYG6SJrr+YgwamgM=
|
||||
github.com/opencontainers/runc v1.0.0-rc94/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM=
|
||||
github.com/opencontainers/runc v1.0.0/go.mod h1:MU2S3KEB2ZExnhnAQYbwjdYV6HwKtDlNbA2Z2OeNDeA=
|
||||
github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
|
||||
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
@@ -585,25 +633,25 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
|
||||
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
github.com/opencontainers/selinux v1.8.1 h1:yvEZh7CsfnJNwKzG9ZeXwbvR05RAZsu5RS/3vA6qFTA=
|
||||
github.com/opencontainers/selinux v1.8.1/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc=
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
|
||||
github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
|
||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
@@ -637,10 +685,10 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
|
||||
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
@@ -669,13 +717,16 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
@@ -683,7 +734,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -718,8 +769,8 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
|
||||
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||
github.com/vbauerster/mpb/v6 v6.0.3 h1:j+twHHhSUe8aXWaT/27E98G5cSBeqEuJSVCMjmLg0PI=
|
||||
github.com/vbauerster/mpb/v6 v6.0.3/go.mod h1:5luBx4rDLWxpA4t6I5sdeeQuZhqDxc+wr5Nqf35+tnM=
|
||||
github.com/vbauerster/mpb/v7 v7.0.3 h1:NfX0pHWhlDTev15M/C3qmSTM1EiIjcS+/d6qS6H4FnI=
|
||||
github.com/vbauerster/mpb/v7 v7.0.3/go.mod h1:NXGsfPGx6G2JssqvEcULtDqUrxuuYs4llpv8W6ZUpzk=
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
@@ -727,7 +778,6 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
|
||||
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
|
||||
@@ -739,27 +789,40 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e h1:m9LfARr2VIOW0vsV19kEKp/sWQvZnGobA8JHui/XJoY=
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@@ -771,6 +834,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@@ -799,6 +863,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
@@ -807,6 +873,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -826,6 +895,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -837,13 +907,24 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -851,16 +932,25 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -908,26 +998,44 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -936,6 +1044,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -979,10 +1088,27 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -998,12 +1124,26 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
@@ -1024,10 +1164,32 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
@@ -1040,9 +1202,19 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1068,7 +1240,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
@@ -1080,14 +1252,16 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
@@ -1099,6 +1273,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
|
||||
@@ -5,7 +5,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
VALIDATE_BRANCH='main'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||
|
||||
150
hack/man-page-checker
Executable file
150
hack/man-page-checker
Executable file
@@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# man-page-checker - validate and cross-reference man page names
|
||||
#
|
||||
# This is the script that cross-checks BETWEEN MAN PAGES. It is not the
|
||||
# script that cross-checks that each option in skopeo foo --help is listed
|
||||
# in skopeo-foo.1.md and vice-versa; that one is xref-helpmsgs-manpages.
|
||||
#
|
||||
|
||||
verbose=
|
||||
for i; do
|
||||
case "$i" in
|
||||
-v|--verbose) verbose=verbose ;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
die() {
|
||||
echo "$(basename $0): $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
cd $(dirname $0)/../docs || die "Please run me from top-level skopeo dir"
|
||||
|
||||
rc=0
|
||||
|
||||
# Pass 1: cross-check file names with NAME section
|
||||
#
|
||||
# for a given skopeo-foo.1.md, the NAME should be 'skopeo-foo'
|
||||
for md in *.1.md;do
|
||||
# Read the first line after '## NAME'
|
||||
name=$(egrep -A1 '^## NAME' $md|tail -1|awk '{print $1}' | tr -d \\\\)
|
||||
|
||||
expect=$(basename $md .1.md)
|
||||
if [ "$name" != "$expect" ]; then
|
||||
echo
|
||||
printf "Inconsistent program NAME in %s:\n" $md
|
||||
printf " NAME= %s (expected: %s)\n" $name $expect
|
||||
rc=1
|
||||
fi
|
||||
done
|
||||
|
||||
# Pass 2: compare descriptions.
|
||||
#
|
||||
# Make sure the descriptive text in skopeo-foo.1.md matches the one
|
||||
# in the table in skopeo.1.md.
|
||||
for md in $(ls -1 *-*.1.md);do
|
||||
desc=$(egrep -A1 '^## NAME' $md|tail -1|sed -E -e 's/^skopeo[^[:space:]]+ - //')
|
||||
|
||||
# Find the descriptive text in the main skopeo man page.
|
||||
parent=skopeo.1.md
|
||||
parent_desc=$(grep $md $parent | awk -F'|' '{print $3}' | sed -E -e 's/^[[:space:]]+//' -e 's/[[:space:]]+$//')
|
||||
|
||||
if [ "$desc" != "$parent_desc" ]; then
|
||||
echo
|
||||
printf "Inconsistent subcommand descriptions:\n"
|
||||
printf " %-32s = '%s'\n" $md "$desc"
|
||||
printf " %-32s = '%s'\n" $parent "$parent_desc"
|
||||
printf "Please ensure that the NAME section of $md\n"
|
||||
printf "matches the subcommand description in $parent\n"
|
||||
rc=1
|
||||
fi
|
||||
done
|
||||
|
||||
# Helper function: compares man page synopsis vs --help usage message
|
||||
function compare_usage() {
|
||||
local cmd="$1"
|
||||
local from_man="$2"
|
||||
|
||||
# Run 'cmd --help', grab the line immediately after 'Usage:'
|
||||
local help_output=$(../bin/$cmd --help)
|
||||
local from_help=$(echo "$help_output" | grep -A1 '^Usage:' | tail -1)
|
||||
|
||||
# strip off command name from both
|
||||
from_man=$(sed -E -e "s/\*\*$cmd\*\*[[:space:]]*//" <<<"$from_man")
|
||||
from_help=$(sed -E -e "s/^[[:space:]]*$cmd[[:space:]]*//" <<<"$from_help")
|
||||
|
||||
# man page lists 'foo [*options*]', help msg shows 'foo [command options]'.
|
||||
# Make sure if one has it, the other does too.
|
||||
if expr "$from_man" : "\[\*options\*\]" >/dev/null; then
|
||||
if expr "$from_help" : "\[command options\]" >/dev/null; then
|
||||
:
|
||||
else
|
||||
echo "WARNING: $cmd: man page shows '[*options*]', help does not show [command options]"
|
||||
rc=1
|
||||
fi
|
||||
elif expr "$from_help" : "\[command options\]" >/dev/null; then
|
||||
echo "WARNING: $cmd: --help shows [command options], man page does not show [*options*]"
|
||||
rc=1
|
||||
fi
|
||||
|
||||
# Strip off options and flags; start comparing arguments
|
||||
from_man=$(sed -E -e 's/^\[\*options\*\][[:space:]]*//' <<<"$from_man")
|
||||
from_help=$(sed -E -e 's/^\[command options\][[:space:]]*//' <<<"$from_help")
|
||||
|
||||
# Constant strings in man page are '**foo**', in --help are 'foo'.
|
||||
from_man=$(sed -E -e 's/\*\*([^*]+)\*\*/\1/g' <<<"$from_man")
|
||||
|
||||
# Args in man page are '_foo_', in --help are 'FOO'. Convert all to
|
||||
# UPCASE simply because it stands out better to the eye.
|
||||
from_man=$(sed -E -e 's/_([a-z-]+)_/\U\1/g' <<<"$from_man")
|
||||
|
||||
# Compare man-page and --help usage strings. Skip 'skopeo' itself,
|
||||
# because the man page includes '[global options]' which we don't grok.
|
||||
if [[ "$from_man" != "$from_help" && "$cmd" != "skopeo" ]]; then
|
||||
printf "%-25s man='%s' help='%s'\n" "$cmd:" "$from_man" "$from_help"
|
||||
rc=1
|
||||
fi
|
||||
}
|
||||
|
||||
# Pass 3: compare synopses.
|
||||
#
|
||||
# Make sure the SYNOPSIS line in skopeo-foo.1.md reads '**skopeo foo** ...'
|
||||
for md in *.1.md;do
|
||||
synopsis=$(egrep -A1 '^#* SYNOPSIS' $md|tail -1)
|
||||
|
||||
# Command name must be bracketed by double asterisks; options and
|
||||
# arguments are bracketed by single ones.
|
||||
# E.g. '**skopeo copy** [*options*] _..._'
|
||||
# Get the command name, and confirm that it matches the md file name.
|
||||
cmd=$(echo "$synopsis" | sed -E -e 's/^\*\*([^*]+)\*\*.*/\1/' | tr -d \*)
|
||||
# Use sed, not tr, so we only replace the first dash: we want
|
||||
# skopeo-list-tags -> "skopeo list-tags", not "skopeo list tags"
|
||||
md_nodash=$(basename "$md" .1.md | sed -e 's/-/ /')
|
||||
if [ "$cmd" != "$md_nodash" ]; then
|
||||
echo
|
||||
printf "Inconsistent program name in SYNOPSIS in %s:\n" $md
|
||||
printf " SYNOPSIS = %s (expected: '%s')\n" "$cmd" "$md_nodash"
|
||||
rc=1
|
||||
fi
|
||||
|
||||
# The convention is to use UPPER CASE in 'skopeo foo --help',
|
||||
# but *lower case bracketed by asterisks* in the man page
|
||||
if expr "$synopsis" : ".*[A-Z]" >/dev/null; then
|
||||
echo
|
||||
printf "Inconsistent capitalization in SYNOPSIS in %s\n" $md
|
||||
printf " '%s' should not contain upper-case characters\n" "$synopsis"
|
||||
rc=1
|
||||
fi
|
||||
|
||||
# (for debugging, and getting a sense of standard conventions)
|
||||
#printf " %-32s ------ '%s'\n" $md "$synopsis"
|
||||
|
||||
# If bin/skopeo is available, run "cmd --help" and compare Usage
|
||||
# messages. This is complicated, so do it in a helper function.
|
||||
compare_usage "$md_nodash" "$synopsis"
|
||||
done
|
||||
|
||||
|
||||
exit $rc
|
||||
277
hack/xref-helpmsgs-manpages
Executable file
277
hack/xref-helpmsgs-manpages
Executable file
@@ -0,0 +1,277 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# xref-helpmsgs-manpages - cross-reference --help options against man pages
|
||||
#
|
||||
package LibPod::CI::XrefHelpmsgsManpages;
|
||||
|
||||
use v5.14;
|
||||
use utf8;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
(our $ME = $0) =~ s|.*/||;
|
||||
our $VERSION = '0.1';
|
||||
|
||||
# For debugging, show data structures using DumpTree($var)
|
||||
#use Data::TreeDumper; $Data::TreeDumper::Displayaddress = 0;
|
||||
|
||||
# unbuffer output
|
||||
$| = 1;
|
||||
|
||||
###############################################################################
|
||||
# BEGIN user-customizable section
|
||||
|
||||
# Path to skopeo executable
|
||||
my $Default_Skopeo = './bin/skopeo';
|
||||
my $SKOPEO = $ENV{SKOPEO} || $Default_Skopeo;
|
||||
|
||||
# Path to all doc files (markdown)
|
||||
my $Docs_Path = 'docs';
|
||||
|
||||
# Global error count
|
||||
my $Errs = 0;
|
||||
|
||||
# END user-customizable section
|
||||
###############################################################################
|
||||
|
||||
###############################################################################
|
||||
# BEGIN boilerplate args checking, usage messages
|
||||
|
||||
sub usage {
|
||||
print <<"END_USAGE";
|
||||
Usage: $ME [OPTIONS]
|
||||
|
||||
$ME recursively runs 'skopeo --help' against
|
||||
all subcommands; and recursively reads skopeo-*.1.md files
|
||||
in $Docs_Path, then cross-references that each --help
|
||||
option is listed in the appropriate man page and vice-versa.
|
||||
|
||||
$ME invokes '\$SKOPEO' (default: $Default_Skopeo).
|
||||
|
||||
Exit status is zero if no inconsistencies found, one otherwise
|
||||
|
||||
OPTIONS:
|
||||
|
||||
-v, --verbose show verbose progress indicators
|
||||
-n, --dry-run make no actual changes
|
||||
|
||||
--help display this message
|
||||
--version display program name and version
|
||||
END_USAGE
|
||||
|
||||
exit;
|
||||
}
|
||||
|
||||
# Command-line options. Note that this operates directly on @ARGV !
|
||||
our $debug = 0;
|
||||
our $verbose = 0;
|
||||
sub handle_opts {
|
||||
use Getopt::Long;
|
||||
GetOptions(
|
||||
'debug!' => \$debug,
|
||||
'verbose|v' => \$verbose,
|
||||
|
||||
help => \&usage,
|
||||
version => sub { print "$ME version $VERSION\n"; exit 0 },
|
||||
) or die "Try `$ME --help' for help\n";
|
||||
}
|
||||
|
||||
# END boilerplate args checking, usage messages
|
||||
###############################################################################
|
||||
|
||||
############################## CODE BEGINS HERE ###############################
|
||||
|
||||
# The term is "modulino".
|
||||
__PACKAGE__->main() unless caller();
|
||||
|
||||
# Main code.
|
||||
sub main {
|
||||
# Note that we operate directly on @ARGV, not on function parameters.
|
||||
# This is deliberate: it's because Getopt::Long only operates on @ARGV
|
||||
# and there's no clean way to make it use @_.
|
||||
handle_opts(); # will set package globals
|
||||
|
||||
# Fetch command-line arguments. Barf if too many.
|
||||
die "$ME: Too many arguments; try $ME --help\n" if @ARGV;
|
||||
|
||||
my $help = skopeo_help();
|
||||
my $man = skopeo_man('skopeo');
|
||||
|
||||
xref_by_help($help, $man);
|
||||
xref_by_man($help, $man);
|
||||
|
||||
exit !!$Errs;
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN cross-referencing
|
||||
|
||||
##################
|
||||
# xref_by_help # Find keys in '--help' but not in man
|
||||
##################
|
||||
sub xref_by_help {
|
||||
my ($help, $man, @subcommand) = @_;
|
||||
|
||||
for my $k (sort keys %$help) {
|
||||
if (exists $man->{$k}) {
|
||||
if (ref $help->{$k}) {
|
||||
xref_by_help($help->{$k}, $man->{$k}, @subcommand, $k);
|
||||
}
|
||||
# Otherwise, non-ref is leaf node such as a --option
|
||||
}
|
||||
else {
|
||||
my $man = $man->{_path} || 'man';
|
||||
warn "$ME: skopeo @subcommand --help lists $k, but $k not in $man\n";
|
||||
++$Errs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#################
|
||||
# xref_by_man # Find keys in man pages but not in --help
|
||||
#################
|
||||
#
|
||||
# In an ideal world we could share the functionality in one function; but
|
||||
# there are just too many special cases in man pages.
|
||||
#
|
||||
sub xref_by_man {
|
||||
my ($help, $man, @subcommand) = @_;
|
||||
|
||||
# FIXME: this generates way too much output
|
||||
for my $k (grep { $_ ne '_path' } sort keys %$man) {
|
||||
if (exists $help->{$k}) {
|
||||
if (ref $man->{$k}) {
|
||||
xref_by_man($help->{$k}, $man->{$k}, @subcommand, $k);
|
||||
}
|
||||
}
|
||||
elsif ($k ne '--help' && $k ne '-h') {
|
||||
my $man = $man->{_path} || 'man';
|
||||
|
||||
warn "$ME: skopeo @subcommand: $k in $man, but not --help\n";
|
||||
++$Errs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# END cross-referencing
|
||||
###############################################################################
|
||||
# BEGIN data gathering
|
||||
|
||||
#################
|
||||
# skopeo_help # Parse output of 'skopeo [subcommand] --help'
|
||||
#################
|
||||
sub skopeo_help {
|
||||
my %help;
|
||||
open my $fh, '-|', $SKOPEO, @_, '--help'
|
||||
or die "$ME: Cannot fork: $!\n";
|
||||
my $section = '';
|
||||
while (my $line = <$fh>) {
|
||||
# Cobra is blessedly consistent in its output:
|
||||
# Usage: ...
|
||||
# Available Commands:
|
||||
# ....
|
||||
# Options:
|
||||
# ....
|
||||
#
|
||||
# Start by identifying the section we're in...
|
||||
if ($line =~ /^Available\s+(Commands):/) {
|
||||
$section = lc $1;
|
||||
}
|
||||
elsif ($line =~ /^(Flags):/) {
|
||||
$section = lc $1;
|
||||
}
|
||||
|
||||
# ...then track commands and options. For subcommands, recurse.
|
||||
elsif ($section eq 'commands') {
|
||||
if ($line =~ /^\s{1,4}(\S+)\s/) {
|
||||
my $subcommand = $1;
|
||||
print "> skopeo @_ $subcommand\n" if $debug;
|
||||
$help{$subcommand} = skopeo_help(@_, $subcommand)
|
||||
unless $subcommand eq 'help'; # 'help' not in man
|
||||
}
|
||||
}
|
||||
elsif ($section eq 'flags') {
|
||||
# Handle '--foo' or '-f, --foo'
|
||||
if ($line =~ /^\s{1,10}(--\S+)\s/) {
|
||||
print "> skopeo @_ $1\n" if $debug;
|
||||
$help{$1} = 1;
|
||||
}
|
||||
elsif ($line =~ /^\s{1,10}(-\S),\s+(--\S+)\s/) {
|
||||
print "> skopeo @_ $1, $2\n" if $debug;
|
||||
$help{$1} = $help{$2} = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
close $fh
|
||||
or die "$ME: Error running 'skopeo @_ --help'\n";
|
||||
|
||||
return \%help;
|
||||
}
|
||||
|
||||
|
||||
################
|
||||
# skopeo_man # Parse contents of skopeo-*.1.md
|
||||
################
|
||||
sub skopeo_man {
|
||||
my $command = shift;
|
||||
my $manpath = "$Docs_Path/$command.1.md";
|
||||
print "** $manpath \n" if $debug;
|
||||
|
||||
my %man = (_path => $manpath);
|
||||
open my $fh, '<', $manpath
|
||||
or die "$ME: Cannot read $manpath: $!\n";
|
||||
my $section = '';
|
||||
my @most_recent_flags;
|
||||
my $previous_subcmd = '';
|
||||
while (my $line = <$fh>) {
|
||||
chomp $line;
|
||||
next unless $line; # skip empty lines
|
||||
|
||||
# .md files designate sections with leading double hash
|
||||
if ($line =~ /^##\s*OPTIONS/) {
|
||||
$section = 'flags';
|
||||
}
|
||||
elsif ($line =~ /^\#\#\s+(SUB)?COMMANDS/) {
|
||||
$section = 'commands';
|
||||
}
|
||||
elsif ($line =~ /^\#\#[^#]/) {
|
||||
$section = '';
|
||||
}
|
||||
|
||||
# This will be a table containing subcommand names, links to man pages.
|
||||
elsif ($section eq 'commands') {
|
||||
# In skopeo.1.md
|
||||
if ($line =~ /^\|\s*\[skopeo-(\S+?)\(\d\)\]/) {
|
||||
# $1 will be changed by recursion _*BEFORE*_ left-hand assignment
|
||||
my $subcmd = $1;
|
||||
$man{$subcmd} = skopeo_man("skopeo-$1");
|
||||
}
|
||||
}
|
||||
|
||||
# Options should always be of the form '**-f**' or '**\-\-flag**',
|
||||
# possibly separated by comma-space.
|
||||
elsif ($section eq 'flags') {
|
||||
# If option has long and short form, long must come first.
|
||||
# This is a while-loop because there may be multiple long
|
||||
# option names (not in skopeo ATM, but leave the possibility open)
|
||||
while ($line =~ s/^\*\*(--[a-z0-9.-]+)\*\*(=\*[a-zA-Z0-9-]+\*)?(,\s+)?//g) {
|
||||
$man{$1} = 1;
|
||||
}
|
||||
# Short form
|
||||
if ($line =~ s/^\*\*(-[a-zA-Z0-9.])\*\*(=\*[a-zA-Z0-9-]+\*)?//g) {
|
||||
$man{$1} = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
close $fh;
|
||||
|
||||
return \%man;
|
||||
}
|
||||
|
||||
|
||||
|
||||
# END data gathering
|
||||
###############################################################################
|
||||
|
||||
1;
|
||||
@@ -146,7 +146,7 @@ sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-dev
|
||||
|
||||
```bash
|
||||
# Ubuntu (`libbtrfs-dev` requires Ubuntu 18.10 and above):
|
||||
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev
|
||||
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev pkg-config
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -182,6 +182,11 @@ sudo apt-get install go-md2man
|
||||
sudo dnf install go-md2man
|
||||
```
|
||||
|
||||
```
|
||||
# MacOS:
|
||||
brew install go-md2man
|
||||
```
|
||||
|
||||
Then
|
||||
|
||||
```bash
|
||||
|
||||
@@ -101,7 +101,7 @@ func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// copy to private registry using local authentication
|
||||
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://docker.io/library/busybox:latest", imageName)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName)
|
||||
// inspect from private registry
|
||||
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
|
||||
// logout from the registry
|
||||
|
||||
@@ -318,8 +318,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
var image2 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i2), &image2)
|
||||
@@ -354,8 +354,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
err = json.Unmarshal([]byte(i2), &image2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(image2.Architecture, check.Equals, "amd64")
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i3 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String())
|
||||
var image3 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i3), &image3)
|
||||
@@ -385,8 +385,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(image1.Architecture, check.Equals, "amd64")
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
var image2 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i2), &image2)
|
||||
@@ -417,7 +417,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -452,7 +452,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest")
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -506,12 +506,12 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1)
|
||||
// "push": dir: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned")
|
||||
// The result of pushing and pulling is an equivalent image, except for schema1 embedded names.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2)
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:unsigned")
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:unsigned")
|
||||
}
|
||||
|
||||
// The most basic (skopeo copy) use:
|
||||
@@ -568,6 +568,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(keysDir)
|
||||
undecryptedImgDir, err := ioutil.TempDir("", "copy-5")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(undecryptedImgDir)
|
||||
multiLayerImageDir, err := ioutil.TempDir("", "copy-6")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -602,7 +603,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
"oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted")
|
||||
|
||||
// Copy a standard busybox image locally
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:1.31.1", "oci:"+originalImageDir+":latest")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN+":1.30.1", "oci:"+originalImageDir+":latest")
|
||||
|
||||
// Encrypt the image
|
||||
assertSkopeoSucceeds(c, "", "copy", "--encryption-key",
|
||||
@@ -633,7 +634,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
matchLayerBlobBinaryType(c, decryptedImgDir+"/blobs/sha256", "application/x-gzip", 1)
|
||||
|
||||
// Copy a standard multi layer nginx image locally
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://nginx:1.17.8", "oci:"+multiLayerImageDir+":latest")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQINMultiLayer, "oci:"+multiLayerImageDir+":latest")
|
||||
|
||||
// Partially encrypt the image
|
||||
assertSkopeoSucceeds(c, "", "copy", "--encryption-key", "jwe:"+keysDir+"/public.key",
|
||||
@@ -738,11 +739,11 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN64, "atomic:localhost:5000/myns/unsigned:streaming")
|
||||
// Compare (copies of) the original and the copy:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:streaming")
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:streaming")
|
||||
// FIXME: Also check pushing to docker://
|
||||
}
|
||||
|
||||
@@ -762,7 +763,7 @@ func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
defer os.RemoveAll(oci2)
|
||||
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://busybox", "oci:"+oci1+":latest")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest")
|
||||
// OCI -> Docker
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy")
|
||||
// Docker -> OCI
|
||||
@@ -813,16 +814,16 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
defer os.Remove(policy)
|
||||
|
||||
// type: reject
|
||||
assertSkopeoFails(c, ".*Source image rejected: Running image docker://busybox:latest is rejected by policy.*",
|
||||
"--policy", policy, "copy", "docker://busybox:latest", dirDest)
|
||||
assertSkopeoFails(c, fmt.Sprintf(".*Source image rejected: Running image %s:latest is rejected by policy.*", testFQIN),
|
||||
"--policy", policy, "copy", testFQIN+":latest", dirDest)
|
||||
|
||||
// type: insecureAcceptAnything
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://quay.io/openshift/origin-hello-openshift", dirDest)
|
||||
|
||||
// type: signedBy
|
||||
// Sign the images
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "docker://busybox:1.26", "atomic:localhost:5006/myns/personal:personal")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", "docker://busybox:1.26.1", "atomic:localhost:5006/myns/official:official")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", testFQIN+":1.26", "atomic:localhost:5006/myns/personal:personal")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", testFQIN+":1.26.1", "atomic:localhost:5006/myns/official:official")
|
||||
// Verify that we can pull them
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:personal", dirDest)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:official", dirDest)
|
||||
@@ -876,8 +877,8 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
defer os.Remove(policy)
|
||||
|
||||
// Get some images.
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:armfh", topDirDest+"/dir1")
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:s390x", topDirDest+"/dir2")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN+":armfh", topDirDest+"/dir1")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN+":s390x", topDirDest+"/dir2")
|
||||
|
||||
// Sign the images. By coping from a topDirDest/dirN, also test that non-/restricted paths
|
||||
// use the dir:"" default of insecureAcceptAnything.
|
||||
@@ -993,7 +994,7 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Get an image to work with. Also verifies that we can use Docker repositories with no sigstore configured.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", ourRegistry+"original/busybox")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, ourRegistry+"original/busybox")
|
||||
// Pulling an unsigned image fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest)
|
||||
@@ -1047,7 +1048,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
defer os.Remove(policy)
|
||||
|
||||
// Get an image to work with to an atomic: destination. Also verifies that we can use Docker repositories without X-Registry-Supports-Signatures
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", "atomic:localhost:5000/myns/extension:unsigned")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, "atomic:localhost:5000/myns/extension:unsigned")
|
||||
// Pulling an unsigned image using atomic: fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy,
|
||||
@@ -1071,7 +1072,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
|
||||
// Get another image (different so that they don't share signatures, and sign it using docker://)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir,
|
||||
"copy", "--sign-by", "personal@example.com", "docker://estesp/busybox:ppc64le", "docker://localhost:5000/myns/extension:extension")
|
||||
"copy", "--sign-by", "personal@example.com", testFQIN+":ppc64le", "docker://localhost:5000/myns/extension:extension")
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "oc", "get", "istag", "extension:extension", "-o", "json"))
|
||||
// Pulling the image using atomic: succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy,
|
||||
@@ -1092,12 +1093,10 @@ func copyWithSignedIdentity(c *check.C, src, dest, signedIdentity, signBy, regis
|
||||
|
||||
signingDir := filepath.Join(topDir, "signing-temp")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", src, "dir:"+signingDir)
|
||||
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
|
||||
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
|
||||
filepath.Join(signingDir, "manifest.json"), signedIdentity, signBy)
|
||||
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
|
||||
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, dest)
|
||||
}
|
||||
|
||||
@@ -1127,7 +1126,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
|
||||
|
||||
// Get an image to work with.
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://busybox", regPrefix+"primary:unsigned")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN, regPrefix+"primary:unsigned")
|
||||
// Verify that unsigned images are rejected
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:unsigned", dirDest)
|
||||
@@ -1170,11 +1169,11 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
|
||||
// and only the remapped prefix (mirror) is accessed.
|
||||
assertSkopeoFails(c, ".*Error initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
|
||||
assertSkopeoFails(c, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
@@ -1182,11 +1181,11 @@ func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopyDestWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
@@ -1216,7 +1215,7 @@ func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
|
||||
// oci to v2s1 and vice-versa not supported yet
|
||||
// get v2s2 manifest type
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+srcDir)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+srcDir)
|
||||
verifyManifestMIMEType(c, srcDir, manifest.DockerV2Schema2MediaType)
|
||||
// convert from v2s2 to oci
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format=oci", "dir:"+srcDir, "dir:"+destDir1)
|
||||
@@ -1246,7 +1245,7 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
|
||||
|
||||
// Ensure we are working with a schema2 image.
|
||||
// dir: accepts any manifest format, i.e. this makes …/input2 a schema2 source which cannot be asked to produce schema1 like ordinary docker: registries can.
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+input2Dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+input2Dir)
|
||||
verifyManifestMIMEType(c, input2Dir, manifest.DockerV2Schema2MediaType)
|
||||
// 2→2 (the "f2t2" in tag means "from 2 to 2")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema2Registry+":f2t2")
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/go-check/check"
|
||||
@@ -62,6 +64,7 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
cmd := cluster.clusterCmd(nil, "openshift", "start", "master")
|
||||
cluster.processes = append(cluster.processes, cmd)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
c.Assert(err, check.IsNil)
|
||||
// Send both to the same pipe. This might cause the two streams to be mixed up,
|
||||
// but logging actually goes only to stderr - this primarily ensure we log any
|
||||
// unexpected output to stdout.
|
||||
@@ -108,6 +111,8 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
|
||||
gotPortCheck := false
|
||||
gotLogCheck := false
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
for !gotPortCheck || !gotLogCheck {
|
||||
c.Logf("Waiting for master")
|
||||
select {
|
||||
@@ -120,6 +125,8 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
|
||||
c.Fatal("log check done, success message not found")
|
||||
}
|
||||
gotLogCheck = true
|
||||
case <-ctx.Done():
|
||||
c.Fatalf("Timed out waiting for master: %v", ctx.Err())
|
||||
}
|
||||
}
|
||||
c.Logf("OK, master started!")
|
||||
@@ -165,8 +172,14 @@ func (cluster *openshiftCluster) startRegistryProcess(c *check.C, port int, conf
|
||||
terminatePortCheck <- true
|
||||
}()
|
||||
c.Logf("Waiting for registry to start")
|
||||
<-portOpen
|
||||
c.Logf("OK, Registry port open")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-portOpen:
|
||||
c.Logf("OK, Registry port open")
|
||||
case <-ctx.Done():
|
||||
c.Fatalf("Timed out waiting for registry to start: %v", ctx.Err())
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ to start a container, then within the container:
|
||||
|
||||
An example of what can be done within the container:
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
curl -L -v 'http://localhost:5000/v2/'
|
||||
cat ~/.docker/config.json
|
||||
|
||||
@@ -635,7 +635,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo, tmpDir)
|
||||
|
||||
//tagged
|
||||
assertSkopeoFails(c, ".*Error reading manifest.*",
|
||||
assertSkopeoFails(c, ".*reading manifest.*",
|
||||
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,10 @@ import (
|
||||
const skopeoBinary = "skopeo"
|
||||
const decompressDirsBinary = "./decompress-dirs.sh"
|
||||
|
||||
const testFQIN = "docker://quay.io/libpod/busybox" // tag left off on purpose, some tests need to add a special one
|
||||
const testFQIN64 = "docker://quay.io/libpod/busybox:amd64"
|
||||
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:master" // multi-layer
|
||||
|
||||
// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
|
||||
func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) {
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -25,6 +25,23 @@ let
|
||||
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
|
||||
'';
|
||||
});
|
||||
pcsclite = (static pkg.pcsclite).overrideAttrs (x: {
|
||||
configureFlags = [
|
||||
"--enable-confdir=/etc"
|
||||
"--enable-usbdropdir=/var/lib/pcsc/drivers"
|
||||
"--disable-libsystemd"
|
||||
"--disable-libudev"
|
||||
"--disable-libusb"
|
||||
];
|
||||
buildInputs = [ pkgs.python3 pkgs.dbus ];
|
||||
});
|
||||
systemd = (static pkg.systemd).overrideAttrs (x: {
|
||||
outputs = [ "out" "dev" ];
|
||||
mesonFlags = x.mesonFlags ++ [
|
||||
"-Dglib=false"
|
||||
"-Dstatic-libsystemd=true"
|
||||
];
|
||||
});
|
||||
};
|
||||
};
|
||||
});
|
||||
@@ -47,13 +64,14 @@ let
|
||||
doCheck = false;
|
||||
enableParallelBuilding = true;
|
||||
outputs = [ "out" ];
|
||||
nativeBuildInputs = [ bash gitMinimal go-md2man installShellFiles makeWrapper pkg-config which ];
|
||||
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
|
||||
nativeBuildInputs = [ bash gitMinimal go-md2man pkg-config which ];
|
||||
buildInputs = [ glibc glibc.static glib gpgme libassuan libgpgerror libseccomp ];
|
||||
prePatch = ''
|
||||
export CFLAGS='-static -pthread'
|
||||
export LDFLAGS='-s -w -static-libgcc -static'
|
||||
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
|
||||
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
|
||||
export CGO_ENABLED=1
|
||||
'';
|
||||
buildPhase = ''
|
||||
patchShebangs .
|
||||
|
||||
@@ -23,6 +23,23 @@ let
|
||||
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
|
||||
'';
|
||||
});
|
||||
pcsclite = (static pkg.pcsclite).overrideAttrs (x: {
|
||||
configureFlags = [
|
||||
"--enable-confdir=/etc"
|
||||
"--enable-usbdropdir=/var/lib/pcsc/drivers"
|
||||
"--disable-libsystemd"
|
||||
"--disable-libudev"
|
||||
"--disable-libusb"
|
||||
];
|
||||
buildInputs = [ pkgs.python3 pkgs.dbus ];
|
||||
});
|
||||
systemd = (static pkg.systemd).overrideAttrs (x: {
|
||||
outputs = [ "out" "dev" ];
|
||||
mesonFlags = x.mesonFlags ++ [
|
||||
"-Dglib=false"
|
||||
"-Dstatic-libsystemd=true"
|
||||
];
|
||||
});
|
||||
};
|
||||
};
|
||||
});
|
||||
@@ -45,13 +62,14 @@ let
|
||||
doCheck = false;
|
||||
enableParallelBuilding = true;
|
||||
outputs = [ "out" ];
|
||||
nativeBuildInputs = [ bash gitMinimal go-md2man installShellFiles makeWrapper pkg-config which ];
|
||||
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
|
||||
nativeBuildInputs = [ bash gitMinimal go-md2man pkg-config which ];
|
||||
buildInputs = [ glibc glibc.static glib gpgme libassuan libgpgerror libseccomp ];
|
||||
prePatch = ''
|
||||
export CFLAGS='-static -pthread'
|
||||
export LDFLAGS='-s -w -static-libgcc -static'
|
||||
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
|
||||
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
|
||||
export CGO_ENABLED=1
|
||||
'';
|
||||
buildPhase = ''
|
||||
patchShebangs .
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"url": "https://github.com/nixos/nixpkgs",
|
||||
"rev": "eb7e1ef185f6c990cda5f71fdc4fb02e76ab06d5",
|
||||
"date": "2021-05-05T23:16:00+02:00",
|
||||
"path": "/nix/store/a98lkhjlsqh32ic2kkrv5kkik6jy25wh-nixpkgs",
|
||||
"sha256": "1ibz204c41g7baqga2iaj11yz9l75cfdylkiqjnk5igm81ivivxg",
|
||||
"rev": "2a96414d7e350160a33ed0978449c9ff5b5a6eb3",
|
||||
"date": "2021-07-13T18:21:47+02:00",
|
||||
"path": "/nix/store/2ai9q8ac6vxb2rrngdz82y8jxnk15cvm-nixpkgs",
|
||||
"sha256": "1dzrfqdjq3yq5jjskiqflzy58l2xx6059gay9p1k07zrlm1wigy5",
|
||||
"fetchSubmodules": false,
|
||||
"deepClone": false,
|
||||
"leaveDotGit": false
|
||||
|
||||
@@ -5,4 +5,5 @@ let
|
||||
url = "${json.url}/archive/${json.rev}.tar.gz";
|
||||
inherit (json) sha256;
|
||||
});
|
||||
in nixpkgs
|
||||
in
|
||||
nixpkgs
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
## This Makefile is used to publish skopeo container images with Travis CI ##
|
||||
## Environment variables, used in this Makefile are specified in .travis.yml
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
# Dereference variable $(1), return value if non-empty, otherwise raise an error.
|
||||
err_if_empty = $(if $(strip $($(1))),$(strip $($(1))),$(error Required $(1) variable is undefined or empty))
|
||||
|
||||
# Requires two arguments: Names of the username and the password env. vars.
|
||||
define quay_login
|
||||
@echo "$(call err_if_empty,$(2))" | \
|
||||
docker login quay.io -u "$(call err_if_empty,$(1))" --password-stdin
|
||||
endef
|
||||
|
||||
# Build container image of skopeo upstream based on host architecture
|
||||
build-image/upstream:
|
||||
docker build -t "${UPSTREAM_IMAGE}-${GOARCH}" contrib/skopeoimage/upstream
|
||||
|
||||
# Build container image of skopeo stable based on host architecture
|
||||
build-image/stable:
|
||||
docker build -t "${STABLE_IMAGE}-${GOARCH}" contrib/skopeoimage/stable
|
||||
|
||||
# Push container image of skopeo upstream (based on host architecture) to image repository
|
||||
push-image/upstream:
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker push "${UPSTREAM_IMAGE}-${GOARCH}"
|
||||
|
||||
# Push container image of skopeo stable (based on host architecture) to image default and extra repositories
|
||||
push-image/stable:
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker push "${STABLE_IMAGE}-${GOARCH}"
|
||||
docker tag "${STABLE_IMAGE}-${GOARCH}" "${EXTRA_STABLE_IMAGE}-${GOARCH}"
|
||||
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
|
||||
docker push "${EXTRA_STABLE_IMAGE}-${GOARCH}"
|
||||
|
||||
# Create and push multiarch image manifest of skopeo upstream
|
||||
push-manifest-multiarch/upstream:
|
||||
docker manifest create "${UPSTREAM_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${UPSTREAM_IMAGE}-${arch})
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker manifest push --purge "${UPSTREAM_IMAGE}"
|
||||
|
||||
# Create and push multiarch image manifest of skopeo stable
|
||||
push-manifest-multiarch/stable:
|
||||
docker manifest create "${STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${STABLE_IMAGE}-${arch})
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker manifest push --purge "${STABLE_IMAGE}"
|
||||
# Push to extra repository
|
||||
docker manifest create "${EXTRA_STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${EXTRA_STABLE_IMAGE}-${arch})
|
||||
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
|
||||
docker manifest push --purge "${EXTRA_STABLE_IMAGE}"
|
||||
@@ -1,40 +0,0 @@
|
||||
# skopeo container image build with Travis
|
||||
|
||||
This document describes the details and requirements to build and publish skopeo container images. The images are published as several architecture specific images and multiarch images on top for upstream and stable versions.
|
||||
|
||||
The Travis configuration is available at `.travis.yml`.
|
||||
|
||||
The code to build and publish images is available at `release/Makefile` and should be used only via Travis.
|
||||
|
||||
Travis workflow has 3 major pieces:
|
||||
- `local-build` - build and test source code locally on osx and linux/amd64 environments, 2 jobs are running in parallel
|
||||
- `image-build-push` - build and push container images with several Travis jobs running in parallel to build images for several architectures (linux/amd64, linux/s390x, linux/ppc64le). Build part is done for each PR, push part is executed only in case of cron job or master branch update.
|
||||
- `manifest-multiarch-push` - create and push image manifests, which consists of architecture specific images from previous step. Executed only in case of cron job or master branch update.
|
||||
|
||||
## Ways to have full workflow run
|
||||
- [cron job](https://docs.travis-ci.com/user/cron-jobs/#adding-cron-jobs)
|
||||
- Trigger build from Travis CI
|
||||
- Update code in master branch
|
||||
|
||||
## Environment variables
|
||||
|
||||
Several environment variables are used to customize image names and keep private credentials to push to quay.io repositories.
|
||||
|
||||
**Image tags** are specified in environment variable and should be manually updated in case of new release.
|
||||
|
||||
- `SKOPEO_QUAY_USERNAME` and `SKOPEO_QUAY_PASSWORD` are credentials to push images to `quay.io/skopeo/stable` and `quay.io/skopeo/upstream` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
|
||||
- `CONTAINERS_QUAY_USERNAME` and `CONTAINERS_QUAY_PASSWORD` are credentials to push images to `quay.io/containers/skopeo` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
|
||||
|
||||
Variables in .travis.yml
|
||||
- `MULTIARCH_MANIFEST_ARCHITECTURES` is a list with architecture shortnames, to appear in final multiarch manifest. The values should fit to architectures used in the `image-build-push` Travis step.
|
||||
- `STABLE_IMAGE`, `EXTRA_STABLE_IMAGE` are image names to publish stable Skopeo.
|
||||
- `UPSTREAM_IMAGE` is an image name to publish upstream Skopeo.
|
||||
|
||||
### Values for environment variables
|
||||
|
||||
| Env variable | Value |
|
||||
| -------------------------------- |----------------------------------|
|
||||
| MULTIARCH_MANIFEST_ARCHITECTURES | "amd64 s390x ppc64le" |
|
||||
| STABLE_IMAGE | quay.io/skopeo/stable:v1.2.0 |
|
||||
| EXTRA_STABLE_IMAGE | quay.io/containers/skopeo:v1.2.0 |
|
||||
| UPSTREAM_IMAGE | quay.io/skopeo/upstream:master |
|
||||
@@ -93,7 +93,7 @@ END_EXPECT
|
||||
|
||||
# By default, 'inspect' tries to match our host os+arch. This should fail.
|
||||
run_skopeo 1 inspect $img
|
||||
expect_output --substring "Error parsing manifest for image: Error choosing image instance: no image found in manifest list for architecture $arch, variant " \
|
||||
expect_output --substring "parsing manifest for image: choosing image instance: no image found in manifest list for architecture $arch, variant " \
|
||||
"skopeo inspect, without --raw, fails"
|
||||
|
||||
# With --raw, we can inspect
|
||||
|
||||
2
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
2
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@@ -1,4 +1,4 @@
|
||||
# go-winio
|
||||
# go-winio [](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
|
||||
5
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
5
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
@@ -28,8 +28,9 @@ const (
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeSecurityPrivilege = "SeSecurityPrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
6
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
@@ -59,7 +59,7 @@ var (
|
||||
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
|
||||
ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState
|
||||
|
||||
// ErrProcNotFound is an error encountered when the the process cannot be found
|
||||
// ErrProcNotFound is an error encountered when a procedure look up fails.
|
||||
ErrProcNotFound = hcs.ErrProcNotFound
|
||||
|
||||
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
|
||||
@@ -159,7 +159,7 @@ func (e *ProcessError) Error() string {
|
||||
// IsNotExist checks if an error is caused by the Container or Process not existing.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
// will currently return true when the error is ErrElementNotFound.
|
||||
func IsNotExist(err error) bool {
|
||||
if _, ok := err.(EndpointNotFoundError); ok {
|
||||
return true
|
||||
@@ -192,7 +192,7 @@ func IsTimeout(err error) bool {
|
||||
// a Container or Process being already stopped.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
// will currently return true when the error is ErrElementNotFound.
|
||||
func IsAlreadyStopped(err error) bool {
|
||||
return hcs.IsAlreadyStopped(getInnerError(err))
|
||||
}
|
||||
|
||||
12
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
@@ -60,7 +60,7 @@ var (
|
||||
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
|
||||
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
|
||||
|
||||
// ErrProcNotFound is an error encountered when the the process cannot be found
|
||||
// ErrProcNotFound is an error encountered when a procedure look up fails.
|
||||
ErrProcNotFound = syscall.Errno(0x7f)
|
||||
|
||||
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
|
||||
@@ -242,12 +242,11 @@ func makeProcessError(process *Process, op string, err error, events []ErrorEven
|
||||
// IsNotExist checks if an error is caused by the Container or Process not existing.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
// will currently return true when the error is ErrElementNotFound.
|
||||
func IsNotExist(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrComputeSystemDoesNotExist ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
err == ErrElementNotFound
|
||||
}
|
||||
|
||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||
@@ -278,12 +277,11 @@ func IsTimeout(err error) bool {
|
||||
// a Container or Process being already stopped.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
// will currently return true when the error is ErrElementNotFound.
|
||||
func IsAlreadyStopped(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeAlreadyStopped ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
err == ErrElementNotFound
|
||||
}
|
||||
|
||||
// IsNotSupported returns a boolean indicating whether the error is caused by
|
||||
|
||||
1
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
1
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
@@ -1,2 +1,3 @@
|
||||
.DS_Store
|
||||
.*.sw?
|
||||
/coverage.txt
|
||||
3
vendor/github.com/VividCortex/ewma/.whitesource
generated
vendored
Normal file
3
vendor/github.com/VividCortex/ewma/.whitesource
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"settingsInheritedFrom": "VividCortex/whitesource-config@master"
|
||||
}
|
||||
29
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
29
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
@@ -1,4 +1,8 @@
|
||||
# EWMA [](https://godoc.org/github.com/VividCortex/ewma) 
|
||||
# EWMA
|
||||
|
||||
[](https://godoc.org/github.com/VividCortex/ewma)
|
||||

|
||||
[](https://codecov.io/gh/VividCortex/ewma)
|
||||
|
||||
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||
@@ -103,23 +107,24 @@ View the GoDoc generated documentation [here](http://godoc.org/github.com/VividC
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "github.com/VividCortex/ewma"
|
||||
|
||||
func main() {
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
6
vendor/github.com/VividCortex/ewma/codecov.yml
generated
vendored
Normal file
6
vendor/github.com/VividCortex/ewma/codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 15%
|
||||
patch: off
|
||||
3
vendor/github.com/VividCortex/ewma/go.mod
generated
vendored
Normal file
3
vendor/github.com/VividCortex/ewma/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/VividCortex/ewma
|
||||
|
||||
go 1.12
|
||||
@@ -2,10 +2,9 @@
|
||||
|
||||
*Go language library to map between non-negative integers and boolean values*
|
||||
|
||||
[](https://github.com/willf/bitset/actions?query=workflow%3ATest)
|
||||
[](https://coveralls.io/github/willf/bitset?branch=master)
|
||||
[](https://github.com/willf/bitset/actions?query=workflow%3ATest)
|
||||
[](https://goreportcard.com/report/github.com/willf/bitset)
|
||||
[](https://pkg.go.dev/github.com/willf/bitset?tab=doc)
|
||||
[](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc)
|
||||
|
||||
|
||||
## Description
|
||||
@@ -30,7 +29,7 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/willf/bitset"
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -63,7 +62,7 @@ func main() {
|
||||
|
||||
As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets.
|
||||
|
||||
Package documentation is at: https://pkg.go.dev/github.com/willf/bitset?tab=doc
|
||||
Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc
|
||||
|
||||
## Memory Usage
|
||||
|
||||
@@ -78,7 +77,7 @@ It is possible that a later version will match the `math/bits` return signature
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/willf/bitset
|
||||
go get github.com/bits-and-blooms/bitset
|
||||
```
|
||||
|
||||
## Contributing
|
||||
@@ -209,6 +209,27 @@ func (b *BitSet) Flip(i uint) *BitSet {
|
||||
return b
|
||||
}
|
||||
|
||||
// FlipRange bit in [start, end).
|
||||
// If end>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'end'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) FlipRange(start, end uint) *BitSet {
|
||||
if start >= end {
|
||||
return b
|
||||
}
|
||||
|
||||
b.extendSetMaybe(end - 1)
|
||||
var startWord uint = start >> log2WordSize
|
||||
var endWord uint = end >> log2WordSize
|
||||
b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1)))
|
||||
for i := startWord; i < endWord; i++ {
|
||||
b.set[i] = ^b.set[i]
|
||||
}
|
||||
b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// Shrink shrinks BitSet so that the provided value is the last possible
|
||||
// set value. It clears all bits > the provided index and reduces the size
|
||||
// and length of the set.
|
||||
@@ -519,7 +540,7 @@ func (b *BitSet) Copy(c *BitSet) (count uint) {
|
||||
}
|
||||
|
||||
// Count (number of set bits).
|
||||
// Also known as "popcount" or "popularity count".
|
||||
// Also known as "popcount" or "population count".
|
||||
func (b *BitSet) Count() uint {
|
||||
if b != nil && b.set != nil {
|
||||
return uint(popcntSlice(b.set))
|
||||
3
vendor/github.com/bits-and-blooms/bitset/go.mod
generated
vendored
Normal file
3
vendor/github.com/bits-and-blooms/bitset/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/bits-and-blooms/bitset
|
||||
|
||||
go 1.14
|
||||
157
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
157
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
@@ -69,30 +70,50 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir)
|
||||
|
||||
var (
|
||||
server string
|
||||
err error
|
||||
authConfig types.DockerAuthConfig
|
||||
key, registry string
|
||||
ref reference.Named
|
||||
err error
|
||||
)
|
||||
if len(args) > 1 {
|
||||
return errors.New("login accepts only one registry to login to")
|
||||
}
|
||||
if len(args) == 0 {
|
||||
l := len(args)
|
||||
switch l {
|
||||
case 0:
|
||||
if !opts.AcceptUnspecifiedRegistry {
|
||||
return errors.New("please provide a registry to login to")
|
||||
}
|
||||
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
|
||||
if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
|
||||
registry = key
|
||||
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key)
|
||||
|
||||
case 1:
|
||||
key, registry, ref, err = parseRegistryArgument(args[0], opts.AcceptRepositories)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return errors.New("login accepts only one registry to login to")
|
||||
|
||||
}
|
||||
|
||||
if ref != nil {
|
||||
authConfig, err = config.GetCredentialsForRef(systemContext, ref)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get credentials for repository")
|
||||
}
|
||||
} else {
|
||||
server = getRegistryName(args[0])
|
||||
}
|
||||
authConfig, err := config.GetCredentials(systemContext, server)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading auth file")
|
||||
// nolint: staticcheck
|
||||
authConfig, err = config.GetCredentials(systemContext, registry)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get credentials")
|
||||
}
|
||||
}
|
||||
|
||||
if opts.GetLoginSet {
|
||||
if authConfig.Username == "" {
|
||||
return errors.Errorf("not logged into %s", server)
|
||||
return errors.Errorf("not logged into %s", key)
|
||||
}
|
||||
fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username)
|
||||
return nil
|
||||
@@ -119,9 +140,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
|
||||
// If no username and no password is specified, try to use existing ones.
|
||||
if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" {
|
||||
fmt.Println("Authenticating with existing credentials...")
|
||||
if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, server); err == nil {
|
||||
fmt.Fprintln(opts.Stdout, "Existing credentials are valid. Already logged in to", server)
|
||||
fmt.Fprintf(opts.Stdout, "Authenticating with existing credentials for %s\n", key)
|
||||
if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, registry); err == nil {
|
||||
fmt.Fprintf(opts.Stdout, "Existing credentials are valid. Already logged in to %s\n", registry)
|
||||
return nil
|
||||
}
|
||||
fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password")
|
||||
@@ -132,21 +153,60 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
return errors.Wrap(err, "getting username and password")
|
||||
}
|
||||
|
||||
if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
|
||||
if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
|
||||
// Write the new credentials to the authfile
|
||||
if err := config.SetAuthentication(systemContext, server, username, password); err != nil {
|
||||
desc, err := config.SetCredentials(systemContext, key, username, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.Verbose {
|
||||
fmt.Fprintln(opts.Stdout, "Used: ", desc)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
fmt.Fprintln(opts.Stdout, "Login Succeeded!")
|
||||
return nil
|
||||
}
|
||||
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
|
||||
logrus.Debugf("error logging into %q: %v", server, unauthorized)
|
||||
return errors.Errorf("error logging into %q: invalid username/password", server)
|
||||
logrus.Debugf("error logging into %q: %v", key, unauthorized)
|
||||
return errors.Errorf("error logging into %q: invalid username/password", key)
|
||||
}
|
||||
return errors.Wrapf(err, "authenticating creds for %q", server)
|
||||
return errors.Wrapf(err, "authenticating creds for %q", key)
|
||||
}
|
||||
|
||||
// parseRegistryArgument verifies the provided arg depending if we accept
|
||||
// repositories or not.
|
||||
func parseRegistryArgument(arg string, acceptRepositories bool) (key, registry string, maybeRef reference.Named, err error) {
|
||||
if !acceptRepositories {
|
||||
registry = getRegistryName(arg)
|
||||
key = registry
|
||||
return key, registry, maybeRef, nil
|
||||
}
|
||||
|
||||
key = trimScheme(arg)
|
||||
if key != arg {
|
||||
return key, registry, nil, errors.New("credentials key has https[s]:// prefix")
|
||||
}
|
||||
|
||||
registry = getRegistryName(key)
|
||||
if registry == key {
|
||||
// We cannot parse a reference from a registry, so we stop here
|
||||
return key, registry, nil, nil
|
||||
}
|
||||
|
||||
ref, parseErr := reference.ParseNamed(key)
|
||||
if parseErr != nil {
|
||||
return key, registry, nil, errors.Wrapf(parseErr, "parse reference from %q", key)
|
||||
}
|
||||
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return key, registry, nil, errors.Errorf("reference %q contains tag or digest", ref.String())
|
||||
}
|
||||
|
||||
maybeRef = ref
|
||||
registry = reference.Domain(ref)
|
||||
|
||||
return key, registry, maybeRef, nil
|
||||
}
|
||||
|
||||
// getRegistryName scrubs and parses the input to get the server name
|
||||
@@ -154,13 +214,21 @@ func getRegistryName(server string) string {
|
||||
// removes 'http://' or 'https://' from the front of the
|
||||
// server/registry string if either is there. This will be mostly used
|
||||
// for user input from 'Buildah login' and 'Buildah logout'.
|
||||
server = strings.TrimPrefix(strings.TrimPrefix(server, "https://"), "http://")
|
||||
server = trimScheme(server)
|
||||
// gets the registry from the input. If the input is of the form
|
||||
// quay.io/myuser/myimage, it will parse it and just return quay.io
|
||||
split := strings.Split(server, "/")
|
||||
return split[0]
|
||||
}
|
||||
|
||||
// trimScheme removes the HTTP(s) scheme from the provided repository.
|
||||
func trimScheme(repository string) string {
|
||||
// removes 'http://' or 'https://' from the front of the
|
||||
// server/registry string if either is there. This will be mostly used
|
||||
// for user input from 'Buildah login' and 'Buildah logout'.
|
||||
return strings.TrimPrefix(strings.TrimPrefix(repository, "https://"), "http://")
|
||||
}
|
||||
|
||||
// getUserAndPass gets the username and password from STDIN if not given
|
||||
// using the -u and -p flags. If the username prompt is left empty, the
|
||||
// displayed userFromAuthFile will be used instead.
|
||||
@@ -205,8 +273,9 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
|
||||
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "")
|
||||
|
||||
var (
|
||||
server string
|
||||
err error
|
||||
key, registry string
|
||||
ref reference.Named
|
||||
err error
|
||||
)
|
||||
if len(args) > 1 {
|
||||
return errors.New("logout accepts only one registry to logout from")
|
||||
@@ -215,16 +284,20 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
|
||||
if !opts.AcceptUnspecifiedRegistry {
|
||||
return errors.New("please provide a registry to logout from")
|
||||
}
|
||||
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
|
||||
if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
|
||||
registry = key
|
||||
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key)
|
||||
}
|
||||
if len(args) != 0 {
|
||||
if opts.All {
|
||||
return errors.New("--all takes no arguments")
|
||||
}
|
||||
server = getRegistryName(args[0])
|
||||
key, registry, ref, err = parseRegistryArgument(args[0], opts.AcceptRepositories)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if opts.All {
|
||||
@@ -235,24 +308,34 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
|
||||
return nil
|
||||
}
|
||||
|
||||
err = config.RemoveAuthentication(systemContext, server)
|
||||
err = config.RemoveAuthentication(systemContext, key)
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", server)
|
||||
fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key)
|
||||
return nil
|
||||
case config.ErrNotLoggedIn:
|
||||
authConfig, err := config.GetCredentials(systemContext, server)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading auth file")
|
||||
var authConfig types.DockerAuthConfig
|
||||
if ref != nil {
|
||||
authConfig, err = config.GetCredentialsForRef(systemContext, ref)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get credentials for repository")
|
||||
}
|
||||
} else {
|
||||
// nolint: staticcheck
|
||||
authConfig, err = config.GetCredentials(systemContext, registry)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get credentials")
|
||||
}
|
||||
}
|
||||
authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, server)
|
||||
|
||||
authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry)
|
||||
if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil {
|
||||
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", server)
|
||||
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("Not logged into %s\n", server)
|
||||
return errors.Errorf("Not logged into %s\n", key)
|
||||
default:
|
||||
return errors.Wrapf(err, "logging out of %q", server)
|
||||
return errors.Wrapf(err, "logging out of %q", key)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
20
vendor/github.com/containers/common/pkg/auth/cli.go
generated
vendored
20
vendor/github.com/containers/common/pkg/auth/cli.go
generated
vendored
@@ -14,12 +14,14 @@ type LoginOptions struct {
|
||||
// CLI flags managed by the FlagSet returned by GetLoginFlags
|
||||
// Callers that use GetLoginFlags should not need to touch these values at all; callers that use
|
||||
// other CLI frameworks should set them based on user input.
|
||||
AuthFile string
|
||||
CertDir string
|
||||
Password string
|
||||
Username string
|
||||
StdinPassword bool
|
||||
GetLoginSet bool
|
||||
AuthFile string
|
||||
CertDir string
|
||||
Password string
|
||||
Username string
|
||||
StdinPassword bool
|
||||
GetLoginSet bool
|
||||
Verbose bool // set to true for verbose output
|
||||
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
|
||||
// Options caller can set
|
||||
Stdin io.Reader // set to os.Stdin
|
||||
Stdout io.Writer // set to os.Stdout
|
||||
@@ -31,8 +33,9 @@ type LogoutOptions struct {
|
||||
// CLI flags managed by the FlagSet returned by GetLogoutFlags
|
||||
// Callers that use GetLogoutFlags should not need to touch these values at all; callers that use
|
||||
// other CLI frameworks should set them based on user input.
|
||||
AuthFile string
|
||||
All bool
|
||||
AuthFile string
|
||||
All bool
|
||||
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
|
||||
// Options caller can set
|
||||
Stdout io.Writer // set to os.Stdout
|
||||
AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry
|
||||
@@ -47,6 +50,7 @@ func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet {
|
||||
fs.StringVarP(&flags.Username, "username", "u", "", "Username for registry")
|
||||
fs.BoolVar(&flags.StdinPassword, "password-stdin", false, "Take the password from stdin")
|
||||
fs.BoolVar(&flags.GetLoginSet, "get-login", false, "Return the current login user for the registry")
|
||||
fs.BoolVarP(&flags.Verbose, "verbose", "v", false, "Write more detailed information to stdout")
|
||||
return &fs
|
||||
}
|
||||
|
||||
|
||||
6
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
6
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
@@ -130,7 +130,7 @@ func NewTemplate(name string) *Template {
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
if strings.HasPrefix(text, "table ") {
|
||||
t.isTable = true
|
||||
text = "{{range .}}" + NormalizeFormat(text) + "{{end}}"
|
||||
text = "{{range .}}" + NormalizeFormat(text) + "{{end -}}"
|
||||
} else {
|
||||
text = NormalizeFormat(text)
|
||||
}
|
||||
@@ -157,12 +157,12 @@ func (t *Template) IsTable() bool {
|
||||
return t.isTable
|
||||
}
|
||||
|
||||
var rangeRegex = regexp.MustCompile(`{{\s*range\s*\.\s*}}.*{{\s*end\s*}}`)
|
||||
var rangeRegex = regexp.MustCompile(`{{\s*range\s*\.\s*}}.*{{\s*end\s*-?\s*}}`)
|
||||
|
||||
// EnforceRange ensures that the format string contains a range
|
||||
func EnforceRange(format string) string {
|
||||
if !rangeRegex.MatchString(format) {
|
||||
return "{{range .}}" + format + "{{end}}"
|
||||
return "{{range .}}" + format + "{{end -}}"
|
||||
}
|
||||
return format
|
||||
}
|
||||
|
||||
330
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
330
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
@@ -29,10 +30,10 @@ import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb/v6"
|
||||
"github.com/vbauerster/mpb/v6/decor"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"github.com/vbauerster/mpb/v7"
|
||||
"github.com/vbauerster/mpb/v7/decor"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
@@ -43,10 +44,6 @@ type digestingReader struct {
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// FIXME: disable early layer commits temporarily until a solid solution to
|
||||
// address #1205 has been found.
|
||||
const enableEarlyCommit = false
|
||||
|
||||
var (
|
||||
// ErrDecryptParamsMissing is returned if there is missing decryption parameters
|
||||
ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
|
||||
@@ -61,7 +58,7 @@ var compressionBufferSize = 1048576
|
||||
|
||||
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// using the algorithm that the media type says it should be compressed with
|
||||
var expectedCompressionFormats = map[string]*compression.Algorithm{
|
||||
var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
|
||||
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
|
||||
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
|
||||
@@ -96,7 +93,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// Coverage: This should not happen, the hash.Hash interface requires
|
||||
// d.digest.Write to never return an error, and the io.Writer interface
|
||||
// requires n2 == len(input) if no error is returned.
|
||||
return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
|
||||
return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
@@ -121,7 +118,7 @@ type copier struct {
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache internalblobinfocache.BlobInfoCache2
|
||||
copyInParallel bool
|
||||
compressionFormat compression.Algorithm
|
||||
compressionFormat compressiontypes.Algorithm
|
||||
compressionLevel *int
|
||||
ociDecryptConfig *encconfig.DecryptConfig
|
||||
ociEncryptConfig *encconfig.EncryptConfig
|
||||
@@ -198,12 +195,15 @@ type Options struct {
|
||||
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
||||
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
||||
OciDecryptConfig *encconfig.DecryptConfig
|
||||
|
||||
// MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
|
||||
MaxParallelDownloads uint
|
||||
|
||||
// When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already
|
||||
// exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option
|
||||
// is slightly pessimistic if the destination image doesn't exist, or is not equivalent.
|
||||
OptimizeDestinationImageAlreadyExists bool
|
||||
|
||||
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
|
||||
// to not indicate "nondistributable".
|
||||
DownloadForeignLayers bool
|
||||
@@ -244,7 +244,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
@@ -254,7 +254,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
@@ -290,11 +290,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}
|
||||
// Default to using gzip compression unless specified otherwise.
|
||||
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
||||
algo, err := compression.AlgorithmByName("gzip")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.compressionFormat = algo
|
||||
c.compressionFormat = compression.Gzip
|
||||
} else {
|
||||
c.compressionFormat = *options.DestinationCtx.CompressionFormat
|
||||
}
|
||||
@@ -306,7 +302,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
if !multiImage {
|
||||
@@ -319,15 +315,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
// matches the current system to copy, and copy it.
|
||||
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading manifest for %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "reading manifest for %s", transports.ImageName(srcRef))
|
||||
}
|
||||
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error parsing primary manifest as list for %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "parsing primary manifest as list for %s", transports.ImageName(srcRef))
|
||||
}
|
||||
instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
@@ -338,7 +334,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
|
||||
// If we were asked to copy multiple images and can't, that's an error.
|
||||
if !supportsMultipleImages(c.dest) {
|
||||
return nil, errors.Errorf("Error copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
|
||||
return nil, errors.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
|
||||
}
|
||||
// Copy some or all of the images.
|
||||
switch options.ImageListSelection {
|
||||
@@ -347,13 +343,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
case CopySpecificImages:
|
||||
logrus.Debugf("Source is a manifest list; copying some instances")
|
||||
}
|
||||
if copiedManifest, _, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
if copiedManifest, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
|
||||
return nil, errors.Wrap(err, "Error committing the finished image")
|
||||
return nil, errors.Wrap(err, "committing the finished image")
|
||||
}
|
||||
|
||||
return copiedManifest, nil
|
||||
@@ -380,12 +376,12 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
|
||||
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
|
||||
srcManifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return false, nil, "", "", errors.Wrapf(err, "Error reading manifest from image")
|
||||
return false, nil, "", "", errors.Wrapf(err, "reading manifest from image")
|
||||
}
|
||||
|
||||
srcManifestDigest, err := manifest.Digest(srcManifest)
|
||||
if err != nil {
|
||||
return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
|
||||
return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
|
||||
}
|
||||
|
||||
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
|
||||
@@ -402,7 +398,7 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options,
|
||||
|
||||
destManifestDigest, err := manifest.Digest(destManifest)
|
||||
if err != nil {
|
||||
return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
|
||||
return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
|
||||
}
|
||||
|
||||
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
|
||||
@@ -416,15 +412,15 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options,
|
||||
|
||||
// copyMultipleImages copies some or all of an image list's instances, using
|
||||
// policyContext to validate source image admissibility.
|
||||
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) {
|
||||
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
|
||||
// Parse the list and get a copy of the original value after it's re-encoded.
|
||||
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error reading manifest list")
|
||||
return nil, errors.Wrapf(err, "reading manifest list")
|
||||
}
|
||||
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList))
|
||||
return nil, errors.Wrapf(err, "parsing manifest list %q", string(manifestList))
|
||||
}
|
||||
updatedList := originalList.Clone()
|
||||
|
||||
@@ -436,14 +432,14 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
c.Printf("Getting image list signatures\n")
|
||||
s, err := c.rawSource.GetSignatures(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error reading signatures")
|
||||
return nil, errors.Wrap(err, "reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image list destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
||||
return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
}
|
||||
canModifyManifestList := (len(sigs) == 0)
|
||||
@@ -458,11 +454,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
}
|
||||
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination")
|
||||
return nil, errors.Wrapf(err, "determining manifest list type to write to destination")
|
||||
}
|
||||
if selectedListType != originalList.MIMEType() {
|
||||
if !canModifyManifestList {
|
||||
return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
|
||||
return nil, errors.Errorf("manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -487,7 +483,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
if skip {
|
||||
update, err := updatedList.Instance(instanceDigest)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
||||
// Record the digest/size/type of the manifest that we didn't copy.
|
||||
@@ -500,7 +496,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
|
||||
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
instancesCopied++
|
||||
// Record the result of a possible conversion here.
|
||||
@@ -514,7 +510,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
|
||||
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
|
||||
if err = updatedList.UpdateInstances(updates); err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error updating manifest list")
|
||||
return nil, errors.Wrapf(err, "updating manifest list")
|
||||
}
|
||||
|
||||
// Iterate through supported list types, preferred format first.
|
||||
@@ -529,7 +525,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
if thisListType != updatedList.MIMEType() {
|
||||
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", thisListType)
|
||||
return nil, errors.Wrapf(err, "converting manifest list to list with MIME type %q", thisListType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -537,17 +533,17 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
// by serializing them both so that we can compare them.
|
||||
attemptedManifestList, err := attemptedList.Serialize()
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
|
||||
return nil, errors.Wrapf(err, "encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
|
||||
}
|
||||
originalManifestList, err := originalList.Serialize()
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
|
||||
return nil, errors.Wrapf(err, "encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
|
||||
}
|
||||
|
||||
// If we can't just use the original value, but we have to change it, flag an error.
|
||||
if !bytes.Equal(attemptedManifestList, originalManifestList) {
|
||||
if !canModifyManifestList {
|
||||
return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType)
|
||||
return nil, errors.Errorf(" manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType)
|
||||
}
|
||||
logrus.Debugf("Manifest list has been updated")
|
||||
} else {
|
||||
@@ -567,24 +563,24 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return nil, "", fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
|
||||
// Sign the manifest list.
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestList, options.SignBy)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
c.Printf("Storing list signatures\n")
|
||||
if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error writing signatures")
|
||||
return nil, errors.Wrap(err, "writing signatures")
|
||||
}
|
||||
|
||||
return manifestList, selectedListType, nil
|
||||
return manifestList, nil
|
||||
}
|
||||
|
||||
// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
|
||||
@@ -595,7 +591,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return nil, "", "", errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
return nil, "", "", errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
}
|
||||
if multiImage {
|
||||
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
@@ -609,7 +605,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
return nil, "", "", errors.Wrapf(err, "initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
@@ -621,20 +617,20 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
destIsDigestedReference = true
|
||||
sourceManifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
|
||||
return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
|
||||
}
|
||||
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
|
||||
return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
|
||||
}
|
||||
if !matches {
|
||||
manifestList, _, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
|
||||
return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
|
||||
}
|
||||
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
|
||||
return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
|
||||
@@ -654,7 +650,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
c.Printf("Getting image source signatures\n")
|
||||
s, err := src.Signatures(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", errors.Wrap(err, "Error reading signatures")
|
||||
return nil, "", "", errors.Wrap(err, "reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
@@ -789,7 +785,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil {
|
||||
return nil, "", "", errors.Wrap(err, "Error writing signatures")
|
||||
return nil, "", "", errors.Wrap(err, "writing signatures")
|
||||
}
|
||||
|
||||
return manifestBytes, retManifestType, retManifestDigest, nil
|
||||
@@ -809,11 +805,11 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error parsing image configuration")
|
||||
return errors.Wrapf(err, "parsing image configuration")
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(sys)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting current platform information %#v", sys)
|
||||
return errors.Wrapf(err, "getting current platform information %#v", sys)
|
||||
}
|
||||
|
||||
options := newOrderedSet()
|
||||
@@ -864,7 +860,7 @@ func (ic *imageCopier) noPendingManifestUpdates() bool {
|
||||
// isTTY returns true if the io.Writer is a file and a tty.
|
||||
func isTTY(w io.Writer) bool {
|
||||
if f, ok := w.(*os.File); ok {
|
||||
return terminal.IsTerminal(int(f.Fd()))
|
||||
return term.IsTerminal(int(f.Fd()))
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -893,6 +889,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
err error
|
||||
}
|
||||
|
||||
// The manifest is used to extract the information whether a given
|
||||
// layer is empty.
|
||||
manifestBlob, manifestType, err := ic.src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
man, err := manifest.FromBlob(manifestBlob, manifestType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manifestLayerInfos := man.LayerInfos()
|
||||
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
|
||||
@@ -925,7 +933,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
} else {
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef)
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer)
|
||||
}
|
||||
data[index] = cld
|
||||
}
|
||||
@@ -1026,13 +1034,13 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
||||
}
|
||||
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error creating an updated image manifest")
|
||||
return nil, "", errors.Wrap(err, "creating an updated image manifest")
|
||||
}
|
||||
pendingImage = pi
|
||||
}
|
||||
man, _, err := pendingImage.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error reading manifest")
|
||||
return nil, "", errors.Wrap(err, "reading manifest")
|
||||
}
|
||||
|
||||
if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
|
||||
@@ -1048,7 +1056,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
||||
instanceDigest = &manifestDigest
|
||||
}
|
||||
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
|
||||
return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man))
|
||||
return nil, "", errors.Wrapf(err, "writing manifest %q", string(man))
|
||||
}
|
||||
return man, manifestDigest, nil
|
||||
}
|
||||
@@ -1064,9 +1072,25 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||
}
|
||||
}
|
||||
|
||||
// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar
|
||||
func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator {
|
||||
producer := func(filler interface{}) decor.DecorFunc {
|
||||
return func(s decor.Statistics) string {
|
||||
if s.Total == 0 {
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f)"
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
|
||||
}
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
|
||||
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
|
||||
}
|
||||
}
|
||||
return decor.Any(producer(filler), wcc...)
|
||||
}
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
@@ -1083,19 +1107,32 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
|
||||
// Use a normal progress bar when we know the size (i.e., size > 0).
|
||||
// Otherwise, use a spinner to indicate that something's happening.
|
||||
var bar *mpb.Bar
|
||||
sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft()
|
||||
if info.Size > 0 {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
||||
),
|
||||
)
|
||||
if partial {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
customPartialBlobCounter(sstyle.Build()),
|
||||
),
|
||||
)
|
||||
} else {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
||||
),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
bar = pool.Add(0,
|
||||
mpb.NewSpinnerFiller([]string{".", "..", "...", "....", ""}, mpb.SpinnerOnLeft),
|
||||
sstyle.Build(),
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
@@ -1114,14 +1151,14 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
if srcInfo.Digest != "" {
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1)
|
||||
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -1148,7 +1185,7 @@ type diffIDResult struct {
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
// srcRef can be used as an additional hint to the destination during checking whehter a layer can be reused but srcRef can be nil.
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named) (types.BlobInfo, digest.Digest, error) {
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
|
||||
// If the srcInfo doesn't contain compression information, try to compute it from the
|
||||
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
|
||||
// by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
|
||||
@@ -1195,21 +1232,20 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: ic.canSubstituteBlobs,
|
||||
SrcRef: srcRef,
|
||||
EmptyLayer: emptyLayer,
|
||||
}
|
||||
if enableEarlyCommit {
|
||||
options.LayerIndex = &layerIndex
|
||||
}
|
||||
options.LayerIndex = &layerIndex
|
||||
reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options)
|
||||
} else {
|
||||
reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
|
||||
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
|
||||
bar.SetTotal(0, true)
|
||||
|
||||
// Throw an event that the layer has been skipped
|
||||
@@ -1236,16 +1272,59 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
}
|
||||
}
|
||||
|
||||
// A partial pull is managed by the destination storage, that decides what portions
|
||||
// of the source file are not known yet and must be fetched.
|
||||
// Attempt a partial only when the source allows to retrieve a blob partially and
|
||||
// the destination has support for it.
|
||||
imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
|
||||
imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
|
||||
if okSource && okDest && !diffIDIsNeeded {
|
||||
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
|
||||
|
||||
progress := make(chan int64)
|
||||
terminate := make(chan interface{})
|
||||
|
||||
defer close(terminate)
|
||||
defer close(progress)
|
||||
|
||||
proxy := imageSourceSeekableProxy{
|
||||
source: imgSource,
|
||||
progress: progress,
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case written := <-progress:
|
||||
bar.IncrInt64(written)
|
||||
case <-terminate:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
bar.SetTotal(srcInfo.Size, false)
|
||||
info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache)
|
||||
if err == nil {
|
||||
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||
bar.SetCurrent(srcInfo.Size)
|
||||
bar.SetTotal(srcInfo.Size, true)
|
||||
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||
return info, cachedDiffID, nil
|
||||
}
|
||||
bar.Abort(true)
|
||||
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
||||
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex)
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
@@ -1257,7 +1336,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
return types.BlobInfo{}, "", ctx.Err()
|
||||
case diffIDResult := <-diffIDChan:
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
@@ -1276,11 +1355,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// perhaps (de/re/)compressing the stream,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below
|
||||
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
|
||||
if diffIDIsNeeded {
|
||||
diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
@@ -1288,7 +1367,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||
_ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
|
||||
}()
|
||||
|
||||
getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
|
||||
getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
|
||||
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
|
||||
// reading from the pipe has failed, we don’t really care.
|
||||
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
|
||||
@@ -1301,13 +1380,13 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||
}
|
||||
}
|
||||
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex) // Sets err to nil on success
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
|
||||
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
|
||||
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
|
||||
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
|
||||
result := diffIDResult{
|
||||
digest: "",
|
||||
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
|
||||
@@ -1319,7 +1398,7 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
|
||||
}
|
||||
|
||||
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
|
||||
func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
|
||||
func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
|
||||
if decompressor != nil {
|
||||
s, err := decompressor(stream)
|
||||
if err != nil {
|
||||
@@ -1342,7 +1421,7 @@ type errorAnnotationReader struct {
|
||||
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.reader.Read(b)
|
||||
if err != io.EOF {
|
||||
return n, errors.Wrapf(err, "error happened during read")
|
||||
return n, errors.Wrapf(err, "happened during read")
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -1352,8 +1431,8 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
||||
// perhaps (de/re/)compressing it if canModifyBlob,
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, error) {
|
||||
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
|
||||
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
|
||||
canModifyBlob = false
|
||||
}
|
||||
@@ -1369,7 +1448,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
|
||||
}
|
||||
var destStream io.Reader = digestingReader
|
||||
|
||||
@@ -1383,7 +1462,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
var d digest.Digest
|
||||
destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error decrypting layer %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
srcInfo.Digest = d
|
||||
@@ -1400,7 +1479,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
|
||||
compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
|
||||
@@ -1417,6 +1496,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
originalLayerReader = destStream
|
||||
}
|
||||
|
||||
compressionMetadata := map[string]string{}
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
|
||||
// short-circuit conditions
|
||||
@@ -1445,7 +1525,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
||||
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
||||
// we don’t care.
|
||||
go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter
|
||||
go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
|
||||
destStream = pipeReader
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
@@ -1465,7 +1545,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter
|
||||
go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
|
||||
|
||||
destStream = pipeReader
|
||||
inputInfo.Digest = ""
|
||||
@@ -1525,7 +1605,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
|
||||
s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error encrypting blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
destStream = s
|
||||
@@ -1556,10 +1636,11 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions)
|
||||
if ok {
|
||||
options := internalTypes.PutBlobOptions{
|
||||
Cache: c.blobInfoCache,
|
||||
IsConfig: isConfig,
|
||||
Cache: c.blobInfoCache,
|
||||
IsConfig: isConfig,
|
||||
EmptyLayer: emptyLayer,
|
||||
}
|
||||
if !isConfig && enableEarlyCommit {
|
||||
if !isConfig {
|
||||
options.LayerIndex = &layerIndex
|
||||
}
|
||||
uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
|
||||
@@ -1567,7 +1648,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
|
||||
}
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
|
||||
}
|
||||
|
||||
uploadedInfo.Annotations = srcInfo.Annotations
|
||||
@@ -1599,7 +1680,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
||||
_, err := io.Copy(ioutil.Discard, originalLayerReader)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1631,23 +1712,42 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy all the metadata generated by the compressor into the annotations.
|
||||
if uploadedInfo.Annotations == nil {
|
||||
uploadedInfo.Annotations = map[string]string{}
|
||||
}
|
||||
for k, v := range compressionMetadata {
|
||||
uploadedInfo.Annotations[k] = v
|
||||
}
|
||||
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
|
||||
// doCompression reads all input from src and writes its compressed equivalent to dest.
|
||||
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
|
||||
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := make([]byte, compressionBufferSize)
|
||||
|
||||
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
|
||||
if err != nil {
|
||||
compressor.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
return compressor.Close()
|
||||
}
|
||||
|
||||
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
|
||||
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) {
|
||||
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
|
||||
err := errors.New("Internal error: unexpected panic in compressGoroutine")
|
||||
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
|
||||
}()
|
||||
|
||||
compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer compressor.Close()
|
||||
|
||||
buf := make([]byte, compressionBufferSize)
|
||||
|
||||
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
|
||||
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
|
||||
}
|
||||
|
||||
33
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
33
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
@@ -45,7 +45,7 @@ func (os *orderedSet) append(s string) {
|
||||
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
|
||||
_, srcType, err := ic.src.Manifest(ctx)
|
||||
if err != nil { // This should have been cached?!
|
||||
return "", nil, errors.Wrap(err, "Error reading manifest")
|
||||
return "", nil, errors.Wrap(err, "reading manifest")
|
||||
}
|
||||
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
||||
if srcType != normalizedSrcType {
|
||||
@@ -137,30 +137,29 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
|
||||
if forcedListMIMEType != "" {
|
||||
destSupportedMIMETypes = []string{forcedListMIMEType}
|
||||
}
|
||||
var selectedType string
|
||||
var otherSupportedTypes []string
|
||||
for i := range destSupportedMIMETypes {
|
||||
// The second priority is the first member of the list of acceptable types that is a list,
|
||||
// but keep going in case current type occurs later in the list.
|
||||
if selectedType == "" && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
|
||||
selectedType = destSupportedMIMETypes[i]
|
||||
}
|
||||
// The first priority is the current type, if it's in the list, since that lets us avoid a
|
||||
// conversion that isn't strictly necessary.
|
||||
if destSupportedMIMETypes[i] == currentListMIMEType {
|
||||
selectedType = destSupportedMIMETypes[i]
|
||||
|
||||
prioritizedTypes := newOrderedSet()
|
||||
// The first priority is the current type, if it's in the list, since that lets us avoid a
|
||||
// conversion that isn't strictly necessary.
|
||||
for _, t := range destSupportedMIMETypes {
|
||||
if t == currentListMIMEType {
|
||||
prioritizedTypes.append(currentListMIMEType)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Pick out the other list types that we support.
|
||||
for i := range destSupportedMIMETypes {
|
||||
if selectedType != destSupportedMIMETypes[i] && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
|
||||
otherSupportedTypes = append(otherSupportedTypes, destSupportedMIMETypes[i])
|
||||
for _, t := range destSupportedMIMETypes {
|
||||
if manifest.MIMETypeIsMultiImage(t) {
|
||||
prioritizedTypes.append(t)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||
if selectedType == "" {
|
||||
if len(prioritizedTypes.list) == 0 {
|
||||
return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
||||
}
|
||||
selectedType := prioritizedTypes.list[0]
|
||||
otherSupportedTypes := prioritizedTypes.list[1:]
|
||||
if selectedType != currentListMIMEType {
|
||||
logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes)
|
||||
} else {
|
||||
|
||||
25
vendor/github.com/containers/image/v5/copy/progress_reader.go
generated
vendored
25
vendor/github.com/containers/image/v5/copy/progress_reader.go
generated
vendored
@@ -1,9 +1,11 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
internalTypes "github.com/containers/image/v5/internal/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
@@ -77,3 +79,26 @@ func (r *progressReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes
|
||||
// are received.
|
||||
type imageSourceSeekableProxy struct {
|
||||
// source is the seekable input to read from.
|
||||
source internalTypes.ImageSourceSeekable
|
||||
// progress is the chan where the total number of bytes read so far are reported.
|
||||
progress chan int64
|
||||
}
|
||||
|
||||
// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received
|
||||
// to the progress chan.
|
||||
func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks)
|
||||
if err == nil {
|
||||
total := int64(0)
|
||||
for _, c := range chunks {
|
||||
total += int64(c.Length)
|
||||
}
|
||||
s.progress <- total
|
||||
}
|
||||
return rc, errs, err
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error initializing GPG")
|
||||
return nil, errors.Wrap(err, "initializing GPG")
|
||||
}
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
@@ -25,7 +25,7 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e
|
||||
c.Printf("Signing manifest\n")
|
||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error creating signature")
|
||||
return nil, errors.Wrap(err, "creating signature")
|
||||
}
|
||||
return newSig, nil
|
||||
}
|
||||
|
||||
37
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
37
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@@ -21,20 +21,33 @@ const version = "Directory Transport Version: 1.1\n"
|
||||
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
|
||||
|
||||
type dirImageDestination struct {
|
||||
ref dirReference
|
||||
compress bool
|
||||
ref dirReference
|
||||
desiredLayerCompression types.LayerCompression
|
||||
}
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to a directory.
|
||||
func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) {
|
||||
d := &dirImageDestination{ref: ref, compress: compress}
|
||||
func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) {
|
||||
desiredLayerCompression := types.PreserveOriginal
|
||||
if sys != nil {
|
||||
if sys.DirForceCompress {
|
||||
desiredLayerCompression = types.Compress
|
||||
|
||||
if sys.DirForceDecompress {
|
||||
return nil, errors.Errorf("Cannot compress and decompress at the same time")
|
||||
}
|
||||
}
|
||||
if sys.DirForceDecompress {
|
||||
desiredLayerCompression = types.Decompress
|
||||
}
|
||||
}
|
||||
d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression}
|
||||
|
||||
// If directory exists check if it is empty
|
||||
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
|
||||
// if the contents don't match throw an error
|
||||
dirExists, err := pathExists(d.ref.resolvedPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath)
|
||||
return nil, errors.Wrapf(err, "checking for path %q", d.ref.resolvedPath)
|
||||
}
|
||||
if dirExists {
|
||||
isEmpty, err := isDirEmpty(d.ref.resolvedPath)
|
||||
@@ -45,7 +58,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
|
||||
if !isEmpty {
|
||||
versionExists, err := pathExists(d.ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath())
|
||||
return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
|
||||
}
|
||||
if versionExists {
|
||||
contents, err := ioutil.ReadFile(d.ref.versionPath())
|
||||
@@ -61,7 +74,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
|
||||
}
|
||||
// delete directory contents so that only one image is in the directory at a time
|
||||
if err = removeDirContents(d.ref.resolvedPath); err != nil {
|
||||
return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath)
|
||||
return nil, errors.Wrapf(err, "erasing contents in %q", d.ref.resolvedPath)
|
||||
}
|
||||
logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
|
||||
}
|
||||
@@ -74,7 +87,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
|
||||
// create version file
|
||||
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
|
||||
return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
@@ -101,10 +114,7 @@ func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
if d.compress {
|
||||
return types.Compress
|
||||
}
|
||||
return types.PreserveOriginal
|
||||
return d.desiredLayerCompression
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
@@ -239,6 +249,9 @@ func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
|
||||
6
vendor/github.com/containers/image/v5/directory/directory_transport.go
generated
vendored
6
vendor/github.com/containers/image/v5/directory/directory_transport.go
generated
vendored
@@ -153,11 +153,7 @@ func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemCon
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
compress := false
|
||||
if sys != nil {
|
||||
compress = sys.DirForceCompress
|
||||
}
|
||||
return newImageDestination(ref, compress)
|
||||
return newImageDestination(sys, ref)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
|
||||
3
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
@@ -67,6 +67,9 @@ func (d *archiveImageDestination) Close() error {
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
|
||||
4
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
@@ -81,14 +81,14 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
|
||||
}
|
||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
|
||||
return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating a reference for manifest item @%d", imageIndex)
|
||||
return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
@@ -60,7 +60,7 @@ func openArchiveForWriting(path string) (*os.File, error) {
|
||||
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
||||
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file %q", path)
|
||||
return nil, errors.Wrapf(err, "opening file %q", path)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
@@ -70,7 +70,7 @@ func openArchiveForWriting(path string) (*os.File, error) {
|
||||
}()
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error statting file %q", path)
|
||||
return nil, errors.Wrapf(err, "statting file %q", path)
|
||||
}
|
||||
|
||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||
|
||||
7
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
7
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
@@ -42,7 +42,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
|
||||
|
||||
c, err := newDockerClient(sys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
return nil, errors.Wrap(err, "initializing docker engine client")
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
@@ -84,7 +84,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
|
||||
|
||||
resp, err := c.ImageLoad(ctx, reader, true)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "Error saving image to docker engine")
|
||||
err = errors.Wrap(err, "saving image to docker engine")
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
@@ -128,6 +128,9 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
|
||||
4
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
@@ -25,13 +25,13 @@ type daemonImageSource struct {
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
|
||||
c, err := newDockerClient(sys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
return nil, errors.Wrap(err, "initializing docker engine client")
|
||||
}
|
||||
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
|
||||
// Either way ImageSave should create a tarball with exactly one image.
|
||||
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error loading image from docker engine")
|
||||
return nil, errors.Wrap(err, "loading image from docker engine")
|
||||
}
|
||||
defer inputStream.Close()
|
||||
|
||||
|
||||
57
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
57
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -92,7 +92,7 @@ type bearerToken struct {
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
// dockerClient is configuration for dealing with a single container registry.
|
||||
type dockerClient struct {
|
||||
// The following members are set by newDockerClient and do not change afterwards.
|
||||
sys *types.SystemContext
|
||||
@@ -213,10 +213,9 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
// signatureBase is always set in the return value
|
||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
registry := reference.Domain(ref.ref)
|
||||
auth, err := config.GetCredentials(sys, registry)
|
||||
auth, err := config.GetCredentialsForRef(sys, ref.ref)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
return nil, errors.Wrapf(err, "getting username and password")
|
||||
}
|
||||
|
||||
sigBase, err := SignatureStorageBaseURL(sys, ref, write)
|
||||
@@ -224,6 +223,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registry := reference.Domain(ref.ref)
|
||||
client, err := newDockerClient(sys, registry, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -269,7 +269,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
skipVerify := false
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries")
|
||||
return nil, errors.Wrapf(err, "loading registries")
|
||||
}
|
||||
if reg != nil {
|
||||
if reg.Blocked {
|
||||
@@ -297,14 +297,14 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||
client, err := newDockerClient(sys, registry, registry)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new docker client")
|
||||
return errors.Wrapf(err, "creating new docker client")
|
||||
}
|
||||
client.auth = types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
|
||||
resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -343,9 +343,10 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
v1Res := &V1Results{}
|
||||
|
||||
// Get credentials from authfile for the underlying hostname
|
||||
// We can't use GetCredentialsForRef here because we want to search the whole registry.
|
||||
auth, err := config.GetCredentials(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
return nil, errors.Wrapf(err, "getting username and password")
|
||||
}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
||||
@@ -359,7 +360,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new docker client")
|
||||
return nil, errors.Wrapf(err, "creating new docker client")
|
||||
}
|
||||
client.auth = auth
|
||||
if sys != nil {
|
||||
@@ -379,7 +380,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
|
||||
resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
@@ -399,14 +400,15 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
searchRes := []SearchResult{}
|
||||
path := "/v2/_catalog"
|
||||
for len(searchRes) < limit {
|
||||
resp, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, ""))
|
||||
err := httpResponseToError(resp, "")
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
}
|
||||
v2Res := &V2Results{}
|
||||
@@ -422,7 +424,14 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
res := SearchResult{
|
||||
Name: repo,
|
||||
}
|
||||
searchRes = append(searchRes, res)
|
||||
// bugzilla.redhat.com/show_bug.cgi?id=1976283
|
||||
// If we have a full match, make sure it's listed as the first result.
|
||||
// (Note there might be a full match we never see if we reach the result limit first.)
|
||||
if repo == image {
|
||||
searchRes = append([]SearchResult{res}, searchRes...)
|
||||
} else {
|
||||
searchRes = append(searchRes, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -525,11 +534,10 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
// makeRequest should generally be preferred.
|
||||
// Note that no exponential back off is performed when receiving an http 429 status code.
|
||||
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
||||
req.ContentLength = streamLen
|
||||
}
|
||||
@@ -622,13 +630,11 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequest(http.MethodPost, realm, nil)
|
||||
authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authReq = authReq.WithContext(ctx)
|
||||
|
||||
// Make the form data required against the oauth2 authentication
|
||||
// More details here: https://docs.docker.com/registry/spec/auth/oauth/
|
||||
params := authReq.URL.Query()
|
||||
@@ -672,12 +678,11 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequest(http.MethodGet, realm, nil)
|
||||
authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authReq = authReq.WithContext(ctx)
|
||||
params := authReq.URL.Query()
|
||||
if c.auth.Username != "" {
|
||||
params.Add("account", c.auth.Username)
|
||||
@@ -731,7 +736,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return err
|
||||
@@ -751,14 +756,14 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "error pinging docker registry %s", c.registry)
|
||||
err = errors.Wrapf(err, "pinging container registry %s", c.registry)
|
||||
if c.sys != nil && c.sys.DockerDisableV1Ping {
|
||||
return err
|
||||
}
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return false
|
||||
@@ -792,14 +797,14 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
||||
return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
||||
}
|
||||
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||
@@ -809,7 +814,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||
|
||||
var parsedBody extensionSignatureList
|
||||
if err := json.Unmarshal(body, &parsedBody); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error decoding signature list")
|
||||
return nil, errors.Wrapf(err, "decoding signature list")
|
||||
}
|
||||
return &parsedBody, nil
|
||||
}
|
||||
|
||||
8
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
@@ -68,12 +68,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
tags := make([]string, 0)
|
||||
|
||||
for {
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if err := httpResponseToError(res, "Error fetching tags list"); err != nil {
|
||||
if err := httpResponseToError(res, "fetching tags list"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -134,14 +134,14 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
|
||||
"Accept": manifest.DefaultRequestedManifestMIMETypes,
|
||||
}
|
||||
|
||||
res, err := client.makeRequest(ctx, "HEAD", path, headers, nil, v2Auth, nil)
|
||||
res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading digest %s in %s", tagOrDigest, dr.ref.Name())
|
||||
return "", errors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name())
|
||||
}
|
||||
|
||||
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
|
||||
|
||||
43
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
43
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -147,18 +147,18 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
return types.BlobInfo{}, errors.Wrap(err, "determining upload URL")
|
||||
}
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
@@ -168,18 +168,18 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
|
||||
// returns, so there isn’t a way for the error text to be provided to any of our callers.
|
||||
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked %v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
return nil, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer chunked")
|
||||
return nil, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked")
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error determining upload URL")
|
||||
return nil, errors.Wrap(err, "determining upload URL")
|
||||
}
|
||||
return uploadLocation, nil
|
||||
}()
|
||||
@@ -194,14 +194,14 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
locationQuery.Set("digest", computedDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer to %s", uploadLocation)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation)
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
@@ -215,7 +215,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
@@ -226,7 +226,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
@@ -246,7 +246,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
}
|
||||
mountPath := u.String()
|
||||
logrus.Debugf("Trying to mount %s", mountPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -261,10 +261,10 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
|
||||
return errors.Wrap(err, "determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
@@ -277,7 +277,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -392,7 +392,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
|
||||
// Double-check that the manifest we've been given matches the digest we've been given.
|
||||
matches, err := manifest.MatchesDigest(m, *instanceDigest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error digesting manifest in PutManifest")
|
||||
return errors.Wrapf(err, "digesting manifest in PutManifest")
|
||||
}
|
||||
if !matches {
|
||||
manifestDigest, merr := manifest.Digest(m)
|
||||
@@ -424,13 +424,13 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
err = errors.Wrapf(registryHTTPResponseToError(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
|
||||
err = errors.Wrapf(registryHTTPResponseToError(res), "uploading manifest %s to %s", refTail, d.ref.ref.Name())
|
||||
if isManifestInvalidError(errors.Cause(err)) {
|
||||
err = types.ManifestTypeRejectedError{Err: err}
|
||||
}
|
||||
@@ -621,7 +621,7 @@ sigExists:
|
||||
randBytes := make([]byte, 16)
|
||||
n, err := rand.Read(randBytes)
|
||||
if err != nil || n != 16 {
|
||||
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
||||
return errors.Wrapf(err, "generating random signature len %d", n)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
@@ -640,7 +640,7 @@ sigExists:
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -651,7 +651,7 @@ sigExists:
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "Error uploading signature to %s in %s", path, d.c.registry)
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -659,6 +659,9 @@ sigExists:
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
|
||||
213
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
213
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@@ -6,14 +6,17 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
internalTypes "github.com/containers/image/v5/internal/types"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
@@ -36,7 +39,7 @@ type dockerImageSource struct {
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries configuration")
|
||||
return nil, errors.Wrapf(err, "loading registries configuration")
|
||||
}
|
||||
if registry == nil {
|
||||
// No configuration was found for the provided reference, so use the
|
||||
@@ -69,7 +72,6 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
||||
} else {
|
||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||
}
|
||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
|
||||
if err == nil {
|
||||
return s, nil
|
||||
@@ -190,14 +192,14 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
headers := map[string][]string{
|
||||
"Accept": manifest.DefaultRequestedManifestMIMETypes,
|
||||
}
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
|
||||
res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
||||
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
||||
}
|
||||
|
||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||
@@ -246,7 +248,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
@@ -276,6 +278,82 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlobAt returns a stream for the specified blob.
|
||||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
headers := make(map[string][]string)
|
||||
|
||||
var rangeVals []string
|
||||
for _, c := range chunks {
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||
}
|
||||
|
||||
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
|
||||
|
||||
if len(info.URLs) != 0 {
|
||||
return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := httpResponseToError(res, "Error fetching partial blob"); err != nil {
|
||||
if res.Body != nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusPartialContent {
|
||||
res.Body.Close()
|
||||
return nil, nil, errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
|
||||
mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
streams := make(chan io.ReadCloser)
|
||||
errs := make(chan error)
|
||||
|
||||
go func() {
|
||||
defer close(streams)
|
||||
defer close(errs)
|
||||
if !strings.HasPrefix(mediaType, "multipart/") {
|
||||
streams <- res.Body
|
||||
return
|
||||
}
|
||||
boundary, found := params["boundary"]
|
||||
if !found {
|
||||
errs <- errors.Errorf("could not find boundary")
|
||||
return
|
||||
}
|
||||
buffered := makeBufferedNetworkReader(res.Body, 64, 16384)
|
||||
defer buffered.Close()
|
||||
mr := multipart.NewReader(buffered, boundary)
|
||||
for {
|
||||
p, err := mr.NextPart()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
errs <- err
|
||||
}
|
||||
return
|
||||
}
|
||||
s := signalCloseReader{
|
||||
Closed: make(chan interface{}),
|
||||
Stream: p,
|
||||
}
|
||||
streams <- s
|
||||
// NextPart() cannot be called while the current part
|
||||
// is being read, so wait until it is closed
|
||||
<-s.Closed
|
||||
}
|
||||
}()
|
||||
return streams, errs, nil
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
@@ -286,7 +364,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -376,11 +454,10 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
|
||||
case "http", "https":
|
||||
logrus.Debugf("GET %s", url)
|
||||
req, err := http.NewRequest("GET", url.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := s.c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@@ -445,7 +522,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
|
||||
get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -467,7 +544,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
|
||||
delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -499,3 +576,119 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type bufferedNetworkReaderBuffer struct {
|
||||
data []byte
|
||||
len int
|
||||
consumed int
|
||||
err error
|
||||
}
|
||||
|
||||
type bufferedNetworkReader struct {
|
||||
stream io.Reader
|
||||
emptyBuffer chan *bufferedNetworkReaderBuffer
|
||||
readyBuffer chan *bufferedNetworkReaderBuffer
|
||||
terminate chan bool
|
||||
current *bufferedNetworkReaderBuffer
|
||||
mutex sync.Mutex
|
||||
gotEOF bool
|
||||
}
|
||||
|
||||
// handleBufferedNetworkReader runs in a goroutine
|
||||
func handleBufferedNetworkReader(br *bufferedNetworkReader) {
|
||||
defer close(br.readyBuffer)
|
||||
for {
|
||||
select {
|
||||
case b := <-br.emptyBuffer:
|
||||
b.len, b.err = br.stream.Read(b.data)
|
||||
br.readyBuffer <- b
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
case <-br.terminate:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *bufferedNetworkReader) Close() {
|
||||
close(n.terminate)
|
||||
close(n.emptyBuffer)
|
||||
}
|
||||
|
||||
func (n *bufferedNetworkReader) read(p []byte) (int, error) {
|
||||
if n.current != nil {
|
||||
copied := copy(p, n.current.data[n.current.consumed:n.current.len])
|
||||
n.current.consumed += copied
|
||||
if n.current.consumed == n.current.len {
|
||||
n.emptyBuffer <- n.current
|
||||
n.current = nil
|
||||
}
|
||||
if copied > 0 {
|
||||
return copied, nil
|
||||
}
|
||||
}
|
||||
if n.gotEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
var b *bufferedNetworkReaderBuffer
|
||||
|
||||
select {
|
||||
case b = <-n.readyBuffer:
|
||||
if b.err != nil {
|
||||
if b.err != io.EOF {
|
||||
return b.len, b.err
|
||||
}
|
||||
n.gotEOF = true
|
||||
}
|
||||
b.consumed = 0
|
||||
n.current = b
|
||||
return n.read(p)
|
||||
case <-n.terminate:
|
||||
return 0, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
func (n *bufferedNetworkReader) Read(p []byte) (int, error) {
|
||||
n.mutex.Lock()
|
||||
defer n.mutex.Unlock()
|
||||
|
||||
return n.read(p)
|
||||
}
|
||||
|
||||
func makeBufferedNetworkReader(stream io.Reader, nBuffers, bufferSize uint) *bufferedNetworkReader {
|
||||
br := bufferedNetworkReader{
|
||||
stream: stream,
|
||||
emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
|
||||
readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
|
||||
terminate: make(chan bool),
|
||||
}
|
||||
|
||||
go func() {
|
||||
handleBufferedNetworkReader(&br)
|
||||
}()
|
||||
|
||||
for i := uint(0); i < nBuffers; i++ {
|
||||
b := bufferedNetworkReaderBuffer{
|
||||
data: make([]byte, bufferSize),
|
||||
}
|
||||
br.emptyBuffer <- &b
|
||||
}
|
||||
|
||||
return &br
|
||||
}
|
||||
|
||||
type signalCloseReader struct {
|
||||
Closed chan interface{}
|
||||
Stream io.ReadCloser
|
||||
}
|
||||
|
||||
func (s signalCloseReader) Read(p []byte) (int, error) {
|
||||
return s.Stream.Read(p)
|
||||
}
|
||||
|
||||
func (s signalCloseReader) Close() error {
|
||||
defer close(s.Closed)
|
||||
return s.Stream.Close()
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
@@ -16,7 +16,7 @@ func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for Docker registry-hosted images.
|
||||
// Transport is an ImageTransport for container registry-hosted images.
|
||||
var Transport = dockerTransport{}
|
||||
|
||||
type dockerTransport struct{}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user