mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 22:08:44 +00:00
Compare commits
144 Commits
v1.16.0
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb0302daea | ||
|
|
f4c9af64cb | ||
|
|
45c0a85e35 | ||
|
|
bfd0850f06 | ||
|
|
582ee15bbb | ||
|
|
89cbcbf9c6 | ||
|
|
64361bde06 | ||
|
|
955233070b | ||
|
|
bd1ac4668f | ||
|
|
2b3701b7be | ||
|
|
59ec554738 | ||
|
|
83d0e76b83 | ||
|
|
04d65888a3 | ||
|
|
958d586c45 | ||
|
|
137a912c55 | ||
|
|
dbf619c027 | ||
|
|
52895bc6cd | ||
|
|
d18677e479 | ||
|
|
b78a415987 | ||
|
|
0eb4ad2f54 | ||
|
|
5eba061489 | ||
|
|
9764c99dbd | ||
|
|
54d235403a | ||
|
|
a81cb65fac | ||
|
|
7d6169d219 | ||
|
|
85fa4dff42 | ||
|
|
8380f284c7 | ||
|
|
ed0efc6932 | ||
|
|
3315da48b5 | ||
|
|
ab53f64473 | ||
|
|
c737e5daea | ||
|
|
653db36664 | ||
|
|
0633de6350 | ||
|
|
6483de4894 | ||
|
|
3d2c80f58f | ||
|
|
b5a13bccfd | ||
|
|
eae9e8862c | ||
|
|
392aa3f70f | ||
|
|
b68afb1a73 | ||
|
|
0eb0ac3707 | ||
|
|
2a47dff7e1 | ||
|
|
631555fc83 | ||
|
|
83efeea4f3 | ||
|
|
da5c8d6e7b | ||
|
|
deda96636b | ||
|
|
17ea741413 | ||
|
|
fb777d3906 | ||
|
|
bf5987896a | ||
|
|
568d5d1c6f | ||
|
|
f333a897f6 | ||
|
|
1866ecbda2 | ||
|
|
f7801f77cc | ||
|
|
b116c5bdce | ||
|
|
e288827449 | ||
|
|
27baed919d | ||
|
|
4863e05f5f | ||
|
|
a71a8b4c48 | ||
|
|
4541d649a6 | ||
|
|
96f3804385 | ||
|
|
be26f2eb2f | ||
|
|
e9755957df | ||
|
|
74488c4b86 | ||
|
|
2a3c8ee5b1 | ||
|
|
87f199fbaf | ||
|
|
f423f01d1b | ||
|
|
c5eaf49918 | ||
|
|
186e9b4f0b | ||
|
|
8896960688 | ||
|
|
f818827f6d | ||
|
|
0c25d2c9fb | ||
|
|
bae8ccd7fb | ||
|
|
181429435e | ||
|
|
293ac065b7 | ||
|
|
e354a1431a | ||
|
|
34f0644177 | ||
|
|
0a73f20a51 | ||
|
|
fa1762f52b | ||
|
|
15f69ac611 | ||
|
|
33a629dd7a | ||
|
|
78d7940b94 | ||
|
|
bc57843bdf | ||
|
|
8d3fb4b390 | ||
|
|
5e0eeadd93 | ||
|
|
c0cc7ed28a | ||
|
|
3161b9faf8 | ||
|
|
602c121f51 | ||
|
|
4da797e353 | ||
|
|
7fbdd71411 | ||
|
|
3f66d002f2 | ||
|
|
5fece3398f | ||
|
|
43d066e0ce | ||
|
|
194619b0f8 | ||
|
|
142f20f78e | ||
|
|
10a9e24d0e | ||
|
|
6ad77a1bc4 | ||
|
|
03ca12ed56 | ||
|
|
9b96038384 | ||
|
|
bda9935e0c | ||
|
|
01ad0ed01c | ||
|
|
61ec80a222 | ||
|
|
60b5f0e814 | ||
|
|
f240ce07ac | ||
|
|
f291e8868b | ||
|
|
ee6181427f | ||
|
|
cb58f402f4 | ||
|
|
0fd65fb746 | ||
|
|
e39efb10e1 | ||
|
|
7da6ea07ca | ||
|
|
9ceee81a48 | ||
|
|
6d2672c924 | ||
|
|
dcf8a16c6a | ||
|
|
b113a2de35 | ||
|
|
4c9ec00ae4 | ||
|
|
9166a97bbb | ||
|
|
795705e41c | ||
|
|
bebbbaee46 | ||
|
|
2f45e4888a | ||
|
|
6b13950b11 | ||
|
|
f02a396ce5 | ||
|
|
115a6de9a6 | ||
|
|
a4c46d491f | ||
|
|
a817006786 | ||
|
|
dd5ce5d93e | ||
|
|
229c9fadc1 | ||
|
|
0d16d65dc5 | ||
|
|
6e0b7a126e | ||
|
|
b53eaf47f3 | ||
|
|
b9620327ab | ||
|
|
502f0cae62 | ||
|
|
3151e088bf | ||
|
|
02917be5f6 | ||
|
|
d38276137e | ||
|
|
f64a376157 | ||
|
|
df598d7f5b | ||
|
|
d74fae1766 | ||
|
|
17da582650 | ||
|
|
d2357b38fa | ||
|
|
38e29cdfb6 | ||
|
|
5af7b5dba1 | ||
|
|
84558d451d | ||
|
|
bbaa4b97d4 | ||
|
|
39dccc2aae | ||
|
|
ebf2eac05a | ||
|
|
d17c809348 |
259
.cirrus.yml
259
.cirrus.yml
@@ -1,259 +0,0 @@
|
||||
---
|
||||
|
||||
# Main collection of env. vars to set for all tasks and scripts.
|
||||
env:
|
||||
####
|
||||
#### Global variables used for all tasks
|
||||
####
|
||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||
DEST_BRANCH: "main"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
GOPATH: &gopath "/var/tmp/go"
|
||||
GOBIN: "${GOPATH}/bin"
|
||||
GOCACHE: "${GOPATH}/cache"
|
||||
GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/skopeo"
|
||||
# Required for consistency with containers/image CI
|
||||
SKOPEO_PATH: *gosrc
|
||||
CIRRUS_WORKING_DIR: *gosrc
|
||||
# The default is 'sh' if unspecified
|
||||
CIRRUS_SHELL: "/bin/bash"
|
||||
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
|
||||
SCRIPT_BASE: "./contrib/cirrus"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c20240529t141726z-f40f39d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Built along with the standard PR-based workflow in c/automation_images
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
|
||||
|
||||
|
||||
# Default timeout for each task
|
||||
timeout_in: 45m
|
||||
|
||||
|
||||
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
|
||||
|
||||
|
||||
validate_task:
|
||||
# The git-validation tool doesn't work well on branch or tag push,
|
||||
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
|
||||
# Only do validation for PRs.
|
||||
only_if: &is_pr $CIRRUS_PR != ''
|
||||
container:
|
||||
image: '${SKOPEO_CIDEV_CONTAINER_FQIN}'
|
||||
cpu: 4
|
||||
memory: 8
|
||||
setup_script: |
|
||||
make tools
|
||||
test_script: |
|
||||
make validate-local
|
||||
make vendor && hack/tree_status.sh
|
||||
|
||||
doccheck_task:
|
||||
only_if: *is_pr
|
||||
depends_on:
|
||||
- validate
|
||||
container:
|
||||
image: "${FEDORA_CONTAINER_FQIN}"
|
||||
cpu: 4
|
||||
memory: 8
|
||||
env:
|
||||
BUILDTAGS: &withopengpg 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||
script: |
|
||||
# TODO: Can't use 'runner.sh setup' inside container. However,
|
||||
# removing the pre-installed package is the only necessary step
|
||||
# at the time of this comment.
|
||||
dnf erase -y skopeo # Guarantee non-interference
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
|
||||
|
||||
osx_task:
|
||||
# Don't run for docs-only builds.
|
||||
# Also don't run on release-branches or their PRs,
|
||||
# since base container-image is not version-constrained.
|
||||
only_if: ¬_docs_or_release_branch >-
|
||||
($CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH ||
|
||||
$CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH ) &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
persistent_worker: &mac_pw
|
||||
labels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
purpose: prod
|
||||
env:
|
||||
CIRRUS_WORKING_DIR: "$HOME/ci/task-${CIRRUS_TASK_ID}"
|
||||
# Prevent cache-pollution fron one task to the next.
|
||||
GOPATH: "$CIRRUS_WORKING_DIR/.go"
|
||||
GOCACHE: "$CIRRUS_WORKING_DIR/.go/cache"
|
||||
GOENV: "$CIRRUS_WORKING_DIR/.go/support"
|
||||
GOSRC: "$HOME/ci/task-${CIRRUS_TASK_ID}"
|
||||
TMPDIR: "/private/tmp/ci"
|
||||
# This host is/was shared with potentially many other CI tasks.
|
||||
# The previous task may have been canceled or aborted.
|
||||
prep_script: &mac_cleanup "contrib/cirrus/mac_cleanup.sh"
|
||||
test_script:
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
- go version
|
||||
- go env
|
||||
- make tools
|
||||
- make validate-local test-unit-local bin/skopeo
|
||||
- bin/skopeo -v
|
||||
# This host is/was shared with potentially many other CI tasks.
|
||||
# Ensure nothing is left running while waiting for the next task.
|
||||
always:
|
||||
task_cleanup_script: *mac_cleanup
|
||||
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance: &standardvm
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
cross_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" cross
|
||||
|
||||
|
||||
ostree-rs-ext_task:
|
||||
alias: proxy_ostree_ext
|
||||
only_if: *not_docs_or_release_branch
|
||||
# WARNING: This task potentially performs a container image
|
||||
# build (on change) with runtime package installs. Therefore,
|
||||
# its behavior can be unpredictable and potentially flake-prone.
|
||||
# In case of emergency, uncomment the next statement to bypass.
|
||||
#
|
||||
# skip: $CI == "true"
|
||||
#
|
||||
depends_on:
|
||||
- validate
|
||||
# Ref: https://cirrus-ci.org/guide/docker-builder-vm/#dockerfile-as-a-ci-environment
|
||||
container:
|
||||
# The runtime image will be rebuilt on change
|
||||
dockerfile: contrib/cirrus/ostree_ext.dockerfile
|
||||
docker_arguments: # required build-args
|
||||
BASE_FQIN: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
CIRRUS_IMAGE_VERSION: 2
|
||||
env:
|
||||
EXT_REPO_NAME: ostree-rs-ext
|
||||
EXT_REPO_HOME: $CIRRUS_WORKING_DIR/../$EXT_REPO_NAME
|
||||
EXT_REPO: https://github.com/ostreedev/${EXT_REPO_NAME}.git
|
||||
skopeo_build_script:
|
||||
- dnf builddep -y skopeo
|
||||
- make
|
||||
- make install
|
||||
proxy_ostree_ext_build_script:
|
||||
- git clone --depth 1 $EXT_REPO $EXT_REPO_HOME
|
||||
- cd $EXT_REPO_HOME
|
||||
- cargo test --no-run
|
||||
proxy_ostree_ext_test_script:
|
||||
- cd $EXT_REPO_HOME
|
||||
- cargo test -- --nocapture --quiet
|
||||
|
||||
|
||||
#####
|
||||
##### NOTE: This task is subtantially duplicated in the containers/image
|
||||
##### repository's `.cirrus.yml`. Changes made here should be fully merged
|
||||
##### prior to being manually duplicated and maintained in containers/image.
|
||||
#####
|
||||
test_skopeo_task:
|
||||
alias: test_skopeo
|
||||
# Don't test for [CI:DOCS], [CI:BUILD].
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
matrix:
|
||||
- name: "Skopeo Test" # N/B: Name ref. by hack/get_fqin.sh
|
||||
env:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
|
||||
- name: "Skopeo Test w/ opengpg"
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
vendor_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
|
||||
build_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
unit_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
|
||||
integration_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" integration
|
||||
system_script: >
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
|
||||
|
||||
|
||||
# This task is critical. It updates the "last-used by" timestamp stored
|
||||
# in metadata for all VM images. This mechanism functions in tandem with
|
||||
# an out-of-band pruning operation to remove disused VM images.
|
||||
meta_task:
|
||||
name: "VM img. keepalive"
|
||||
alias: meta
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: |
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
GCPNAME: ENCRYPTED[1cf558ae125e3c39ec401e443ad76452b25d790c45eb73d77c83eb059a0f7fd5085ef7e2f7e410b04ea6e83b0aab2eb1]
|
||||
GCPPROJECT: libpod-218412
|
||||
clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
|
||||
script: /usr/local/bin/entrypoint.sh
|
||||
|
||||
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
# of this task.
|
||||
success_task:
|
||||
name: "Total Success"
|
||||
alias: success
|
||||
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
|
||||
depends_on:
|
||||
- validate
|
||||
- doccheck
|
||||
- osx
|
||||
- cross
|
||||
- proxy_ostree_ext
|
||||
- test_skopeo
|
||||
- meta
|
||||
container: *smallcontainer
|
||||
env:
|
||||
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
|
||||
TEST_ENVIRON: container
|
||||
clone_script: *noop
|
||||
script: /bin/true
|
||||
1
.fmf/version
Normal file
1
.fmf/version
Normal file
@@ -0,0 +1 @@
|
||||
1
|
||||
118
.packit.yaml
118
.packit.yaml
@@ -2,98 +2,64 @@
|
||||
# See the documentation for more information:
|
||||
# https://packit.dev/docs/configuration/
|
||||
|
||||
# NOTE: The Packit copr_build tasks help to check if every commit builds on
|
||||
# supported Fedora and CentOS Stream arches.
|
||||
# They do not block the current Cirrus-based workflow.
|
||||
|
||||
downstream_package_name: skopeo
|
||||
upstream_tag_template: v{version}
|
||||
|
||||
packages:
|
||||
skopeo-fedora:
|
||||
pkg_tool: fedpkg
|
||||
specfile_path: rpm/skopeo.spec
|
||||
skopeo-centos:
|
||||
pkg_tool: centpkg
|
||||
specfile_path: rpm/skopeo.spec
|
||||
skopeo-rhel:
|
||||
specfile_path: rpm/skopeo.spec
|
||||
specfile_path: skopeo.spec
|
||||
|
||||
srpm_build_deps:
|
||||
- make
|
||||
# Disable automatic merging for Copr builds (and subsequent Testing Farm)
|
||||
merge_pr_in_ci: false
|
||||
|
||||
jobs:
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
packages: [skopeo-fedora]
|
||||
notifications: &copr_build_failure_notification
|
||||
notifications: &packit_failure_notification
|
||||
failure_comment:
|
||||
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
||||
targets:
|
||||
fedora-all-x86_64: {}
|
||||
fedora-all-aarch64: {}
|
||||
fedora-eln-x86_64:
|
||||
additional_repos:
|
||||
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/x86_64/"
|
||||
fedora-eln-aarch64:
|
||||
additional_repos:
|
||||
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/"
|
||||
message: "Packit jobs failed. @containers/packit-build please check."
|
||||
enable_net: true
|
||||
targets:
|
||||
- epel-10-x86_64
|
||||
- epel-10-aarch64
|
||||
actions:
|
||||
post-upstream-clone: "curl --fail -O https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/raw/c10s/skopeo.spec"
|
||||
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
packages: [skopeo-centos]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets:
|
||||
- centos-stream-9-x86_64
|
||||
- centos-stream-9-aarch64
|
||||
- centos-stream-10-x86_64
|
||||
- centos-stream-10-aarch64
|
||||
notifications: *packit_failure_notification
|
||||
enable_net: true
|
||||
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
packages: [skopeo-rhel]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets:
|
||||
- epel-9-x86_64
|
||||
- epel-9-aarch64
|
||||
enable_net: true
|
||||
actions:
|
||||
post-upstream-clone: "curl --fail -O https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/raw/c9s/skopeo.spec"
|
||||
|
||||
# Run on commit to main branch
|
||||
- job: copr_build
|
||||
trigger: commit
|
||||
packages: [skopeo-fedora]
|
||||
notifications:
|
||||
failure_comment:
|
||||
message: "podman-next COPR build failed. @containers/packit-build please check."
|
||||
branch: main
|
||||
owner: rhcontainerbot
|
||||
project: podman-next
|
||||
enable_net: true
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
use_internal_tf: true
|
||||
notifications: *packit_failure_notification
|
||||
targets:
|
||||
epel-10-x86_64:
|
||||
distros: [RHEL-10-Nightly,RHEL-10.0-Nightly]
|
||||
epel-10-aarch64:
|
||||
distros: [RHEL-10-Nightly,RHEL-10.0-Nightly]
|
||||
epel-9-x86_64:
|
||||
distros: [RHEL-9-Nightly,RHEL-9.6.0-Nightly]
|
||||
epel-9-aarch64:
|
||||
distros: [RHEL-9-Nightly,RHEL-9.6.0-Nightly]
|
||||
tmt_plan: "/plans/system"
|
||||
identifier: "rpm"
|
||||
|
||||
# Sync to Fedora
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
packages: [skopeo-fedora]
|
||||
update_release: false
|
||||
dist_git_branches:
|
||||
- fedora-all
|
||||
|
||||
# Sync to CentOS Stream
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
packages: [skopeo-centos]
|
||||
update_release: false
|
||||
dist_git_branches:
|
||||
- c10s
|
||||
|
||||
- job: koji_build
|
||||
trigger: commit
|
||||
dist_git_branches:
|
||||
- fedora-all
|
||||
|
||||
- job: bodhi_update
|
||||
trigger: commit
|
||||
dist_git_branches:
|
||||
- fedora-branched # rawhide updates are created automatically
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
use_internal_tf: true
|
||||
notifications: *packit_failure_notification
|
||||
skip_build: true
|
||||
targets:
|
||||
# RHEL-N-Nightly can often have newer toolchain packages, breaking
|
||||
# vendoring. So, only test on N.Y here
|
||||
epel-10-x86_64:
|
||||
distros: [RHEL-10.0-Nightly]
|
||||
epel-9-x86_64:
|
||||
distros: [RHEL-9.6.0-Nightly]
|
||||
tmt_plan: "/plans/no-rpm"
|
||||
identifier: "no-rpm"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
## The skopeo Project Community Code of Conduct
|
||||
|
||||
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
||||
The skopeo project, as part of Podman Container Tools, follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
6
Makefile
6
Makefile
@@ -27,7 +27,7 @@ GOARCH ?= $(shell go env GOARCH)
|
||||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 1.59.1
|
||||
GOLANGCI_LINT_VERSION := 1.63.4
|
||||
|
||||
ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
@@ -70,7 +70,9 @@ export SKOPEO_CONTAINER_TESTS ?= $(if $(CI),1,0)
|
||||
# This is a compromise, we either use a container for this or require
|
||||
# the local user to have a compatible python3 development environment.
|
||||
# Define it as a "resolve on use" variable to avoid calling out when possible
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN ?= $(shell hack/get_fqin.sh)
|
||||
#SKOPEO_CIDEV_CONTAINER_FQIN ?= $(shell hack/get_fqin.sh)
|
||||
# FIXME: hack/get_fqin.sh depends on cirrus.yml so we hardcode SKOPEO_CIDEV_CONTAINER_FQIN here
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN ?= "quay.io/libpod/skopeo_cidev:c20250131t121915z-f41f40d13"
|
||||
CONTAINER_CMD ?= ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" -e CI=$(CI) -e SKOPEO_CONTAINER_TESTS=1
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
|
||||
18
cmd/skopeo/copy_test.go
Normal file
18
cmd/skopeo/copy_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1"},
|
||||
{"a1", "a2", "a3"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"--insecure-policy", "copy"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly two arguments expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in integration and systemtest
|
||||
}
|
||||
@@ -54,3 +54,17 @@ func TestDockerRepositoryReferenceParserDrift(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListTags(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"list-tags"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly one non-option argument expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in systemtest
|
||||
}
|
||||
|
||||
@@ -687,6 +687,7 @@ func (h *proxyHandler) FinishPipe(args []any) (replyBuf, error) {
|
||||
|
||||
// Wait for the goroutine to complete
|
||||
f.wg.Wait()
|
||||
logrus.Debug("Completed pipe goroutine")
|
||||
// And only now do we close the write half; this forces the client to call this API
|
||||
f.w.Close()
|
||||
// Propagate any errors from the goroutine worker
|
||||
@@ -708,6 +709,7 @@ func (h *proxyHandler) close() {
|
||||
|
||||
// send writes a reply buffer to the socket
|
||||
func (buf replyBuf) send(conn *net.UnixConn, err error) error {
|
||||
logrus.Debugf("Sending reply: err=%v value=%v pipeid=%v", err, buf.value, buf.pipeid)
|
||||
replyToSerialize := reply{
|
||||
Success: err == nil,
|
||||
Value: buf.value,
|
||||
@@ -782,6 +784,8 @@ func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate
|
||||
err = fmt.Errorf("invalid request: %v", err)
|
||||
return
|
||||
}
|
||||
logrus.Debugf("Executing method %s", req.Method)
|
||||
|
||||
// Dispatch on the method
|
||||
switch req.Method {
|
||||
case "Initialize":
|
||||
@@ -845,6 +849,7 @@ func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
|
||||
rb, terminate, err := handler.processRequest(readbuf)
|
||||
if terminate {
|
||||
logrus.Debug("terminating")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/pkg/cli/sigstore"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
@@ -46,6 +47,7 @@ type syncOptions struct {
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
digestFile string // Write digest to this file
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
dryRun bool // Don't actually copy anything, just output what it would have done
|
||||
@@ -121,6 +123,7 @@ See skopeo-sync(1) for details.
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.StringVar(&opts.appendSuffix, "append-suffix", "", "String to append to DESTINATION tags")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digests and Image References of the resulting images to the specified file, separated by newlines")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.dryRun, "dry-run", false, "Run without actually copying data")
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
@@ -723,10 +726,24 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
logrus.Warn("Running in dry-run mode")
|
||||
}
|
||||
|
||||
var digestFile *os.File
|
||||
if opts.digestFile != "" && !opts.dryRun {
|
||||
digestFile, err = os.OpenFile(opts.digestFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating digest file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := digestFile.Close(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, "closing digest file", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, srcRepo := range srcRepoList {
|
||||
options.SourceCtx = srcRepo.Context
|
||||
for counter, ref := range srcRepo.ImageRefs {
|
||||
var destSuffix string
|
||||
var manifestBytes []byte
|
||||
switch ref.Transport() {
|
||||
case docker.Transport:
|
||||
// docker -> dir or docker -> docker
|
||||
@@ -758,7 +775,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
} else {
|
||||
logrus.WithFields(fromToFields).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
manifestBytes, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
@@ -770,7 +787,19 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}
|
||||
// Ensure that we log the manifest digest to a file only if the copy operation was successful
|
||||
if opts.digestFile != "" {
|
||||
manifestDigest, err := manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputStr := fmt.Sprintf("%s %s", manifestDigest.String(), transports.ImageName(destRef))
|
||||
if _, err = digestFile.WriteString(outputStr + "\n"); err != nil {
|
||||
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
imagesNumber++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,3 +44,18 @@ func TestTLSVerifyConfig(t *testing.T) {
|
||||
err := yaml.Unmarshal([]byte(`tls-verify: "not a valid bool"`), &config)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1"},
|
||||
{"a1", "a2", "a3"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"sync"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly two arguments expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in integration and systemtest
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"github.com/moby/sys/capability"
|
||||
)
|
||||
|
||||
var neededCapabilities = []capability.Cap{
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
@@ -26,7 +27,7 @@ import (
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that the command’s help should be included.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
@@ -62,7 +63,8 @@ func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd
|
||||
err := handler(args, c.OutOrStdout())
|
||||
var shouldDisplayUsage errorShouldDisplayUsage
|
||||
if errors.As(err, &shouldDisplayUsage) {
|
||||
return c.Help()
|
||||
c.SetOut(c.ErrOrStderr()) // This mutates c, but we are failing anyway.
|
||||
_ = c.Help() // Even if this failed, we prefer to report the original error
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -183,6 +185,7 @@ func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
fs.DurationVar(&opts.Delay, "retry-delay", 0*time.Second, "Fixed delay between retries. If not set, retry uses an exponential backoff delay.")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ ARG CIRRUS_IMAGE_VERSION
|
||||
ENV CIRRUS_IMAGE_VERSION=$CIRRUS_IMAGE_VERSION
|
||||
ADD https://sh.rustup.rs /var/tmp/rustup_installer.sh
|
||||
|
||||
RUN dnf erase -y rust && \
|
||||
RUN dnf remove -y rust && \
|
||||
chmod +x /var/tmp/rustup_installer.sh && \
|
||||
/var/tmp/rustup_installer.sh -y --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ _run_setup() {
|
||||
fi
|
||||
|
||||
# VM's come with the distro. skopeo package pre-installed
|
||||
dnf erase -y skopeo
|
||||
dnf remove -y skopeo
|
||||
|
||||
msg "Removing systemd-resolved from nsswitch.conf"
|
||||
# /etc/resolv.conf is already set to bypass systemd-resolvd
|
||||
|
||||
@@ -183,6 +183,8 @@ Existing signatures, if any, are preserved as well.
|
||||
**--dest-compress-format** _format_
|
||||
|
||||
Specifies the compression format to use. Supported values are: `gzip`, `zstd` and `zstd:chunked`.
|
||||
`zstd:chunked` is incompatible with encrypting images,
|
||||
and will be treated as `zstd` with a warning in that case.
|
||||
|
||||
**--dest-compress-level** _format_
|
||||
|
||||
@@ -202,7 +204,11 @@ Precompute digests to ensure layers are not uploaded that already exist on the d
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--src-username**
|
||||
|
||||
|
||||
@@ -66,7 +66,11 @@ Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ The private key is encrypted with a passphrase;
|
||||
if one is not provided using an option, this command prompts for it interactively.
|
||||
|
||||
The private key is written to _prefix_**.private** .
|
||||
The private key is written to _prefix_**.pub** .
|
||||
The public key is written to _prefix_**.pub** .
|
||||
|
||||
## OPTIONS
|
||||
|
||||
|
||||
@@ -65,7 +65,11 @@ Registry token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
|
||||
@@ -39,7 +39,11 @@ Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
|
||||
@@ -68,6 +68,16 @@ Print usage statement.
|
||||
|
||||
**--append-suffix** _tag-suffix_ String to append to destination tags.
|
||||
|
||||
**--digestfile** _path_
|
||||
|
||||
After copying the images from source, write the digest of the resulting images along with Image Reference.
|
||||
|
||||
```
|
||||
sha256:bf91f90823248017a4f920fb541727fa8368dc6cf377a7debbd271cf6a31c8a7 docker://myhost.com/alpine:edge
|
||||
sha256:31603596830fc7e56753139f9c2c6bd3759e48a850659506ebfb885d1cf3aef5 docker://myhost.com/postgres:14.3
|
||||
|
||||
```
|
||||
|
||||
**--preserve-digests**
|
||||
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
@@ -113,7 +123,13 @@ The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-privat
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
|
||||
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--keep-going**
|
||||
If any errors occur during copying of images, those errors are logged and the process continues syncing rest of the images and finally fails at the end.
|
||||
|
||||
106
go.mod
106
go.mod
@@ -1,56 +1,58 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
// Minimum required golang version
|
||||
go 1.21.0
|
||||
go 1.22.8
|
||||
|
||||
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/containers/common v0.60.0
|
||||
github.com/containers/image/v5 v5.32.0
|
||||
github.com/containers/ocicrypt v1.2.0
|
||||
github.com/containers/storage v1.55.0
|
||||
github.com/Masterminds/semver/v3 v3.3.1
|
||||
github.com/containers/common v0.62.0
|
||||
github.com/containers/image/v5 v5.34.0
|
||||
github.com/containers/ocicrypt v1.2.1
|
||||
github.com/containers/storage v1.57.1
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/moby/sys/capability v0.4.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
|
||||
golang.org/x/term v0.22.0
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329
|
||||
golang.org/x/term v0.29.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.5 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/errdefs v0.1.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v27.1.1+incompatible // indirect
|
||||
github.com/docker/docker v27.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.0 // indirect
|
||||
@@ -65,7 +67,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/go-containerregistry v0.20.0 // indirect
|
||||
github.com/google/go-containerregistry v0.20.2 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
@@ -76,62 +78,62 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.24 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/user v0.2.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/proglottis/gpgme v0.1.4 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.5 // indirect
|
||||
github.com/sigstore/rekor v1.3.6 // indirect
|
||||
github.com/sigstore/sigstore v1.8.4 // indirect
|
||||
github.com/sigstore/fulcio v1.6.4 // indirect
|
||||
github.com/sigstore/rekor v1.3.8 // indirect
|
||||
github.com/sigstore/sigstore v1.8.12 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
||||
github.com/sylabs/sif/v2 v2.18.0 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.20.2 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.7.4 // indirect
|
||||
github.com/vbatts/tar-split v0.11.7 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
golang.org/x/crypto v0.25.0 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
|
||||
google.golang.org/grpc v1.64.1 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/grpc v1.69.4 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
)
|
||||
|
||||
326
go.sum
326
go.sum
@@ -1,6 +1,6 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
@@ -8,12 +8,12 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
|
||||
github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0=
|
||||
github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8=
|
||||
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
|
||||
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
@@ -22,50 +22,54 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
|
||||
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
|
||||
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
|
||||
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containers/common v0.60.0 h1:QMNygqiiit9LU/yqee9Dv0N0oQ+rQq41ElfdOBQtw7w=
|
||||
github.com/containers/common v0.60.0/go.mod h1:dtKVe11xkV89tqzRX9s/B0ORjeB2dy5UB46aGjunMn8=
|
||||
github.com/containers/image/v5 v5.32.0 h1:yjbweazPfr8xOzQ2hkkYm1A2V0jN96/kES6Gwyxj7hQ=
|
||||
github.com/containers/image/v5 v5.32.0/go.mod h1:x5e0RDfGaY6bnQ13gJ2LqbfHvzssfB/y5a8HduGFxJc=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
|
||||
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
|
||||
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||
github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10=
|
||||
github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s=
|
||||
github.com/containers/image/v5 v5.34.0 h1:HPqQaDUsox/3mC1pbOyLAIQEp0JhQqiUZ+6JiFIZLDI=
|
||||
github.com/containers/image/v5 v5.34.0/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
|
||||
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
|
||||
github.com/containers/storage v1.55.0 h1:wTWZ3YpcQf1F+dSP4KxG9iqDfpQY1otaUXjPpffuhgg=
|
||||
github.com/containers/storage v1.55.0/go.mod h1:28cB81IDk+y7ok60Of6u52RbCeBRucbFOeLunhER1RQ=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
|
||||
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||
github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8=
|
||||
github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE=
|
||||
github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
|
||||
github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY=
|
||||
github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
|
||||
github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
|
||||
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
@@ -85,13 +89,13 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
|
||||
@@ -114,12 +118,12 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
|
||||
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
|
||||
github.com/go-rod/rod v0.116.0 h1:ypRryjTys3EnqHskJ/TdgodFMvXV0EHvmy4bSkKZgHM=
|
||||
github.com/go-rod/rod v0.116.0/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw=
|
||||
github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA=
|
||||
github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
||||
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
|
||||
github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -148,21 +152,21 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.20.0 h1:wRqHpOeVh3DnenOrPy9xDOLdnLatiGuuNRVelR2gSbg=
|
||||
github.com/google/go-containerregistry v0.20.0/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
|
||||
github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
|
||||
github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -185,8 +189,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
@@ -194,18 +198,18 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
|
||||
@@ -214,10 +218,12 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
|
||||
github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/user v0.2.0 h1:OnpapJsRp25vkhw8TFG6OLJODNh/3rEwRWtJ3kakwRM=
|
||||
github.com/moby/sys/user v0.2.0/go.mod h1:RYstrcWOJpVh+6qzUqp2bU3eaRpdiQeKGlKitaH0PM8=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -227,12 +233,14 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
@@ -241,8 +249,8 @@ github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eB
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
|
||||
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
||||
@@ -252,49 +260,52 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
|
||||
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M=
|
||||
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||
github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
|
||||
github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
|
||||
github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
|
||||
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
|
||||
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc=
|
||||
github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8=
|
||||
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
|
||||
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
|
||||
github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w=
|
||||
github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg=
|
||||
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
|
||||
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
||||
github.com/sigstore/rekor v1.3.8 h1:B8kJI8mpSIXova4Jxa6vXdJyysRxFGsEsLKBDl0rRjA=
|
||||
github.com/sigstore/rekor v1.3.8/go.mod h1:/dHFYKSuxEygfDRnEwyJ+ZD6qoVYNXQdi1mJrKvKWsI=
|
||||
github.com/sigstore/sigstore v1.8.12 h1:S8xMVZbE2z9ZBuQUEG737pxdLjnbOIcFi5v9UFfkJFc=
|
||||
github.com/sigstore/sigstore v1.8.12/go.mod h1:+PYQAa8rfw0QdPpBcT+Gl3egKD9c+TUgAlF12H3Nmjo=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU=
|
||||
github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -305,23 +316,21 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI=
|
||||
github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
|
||||
github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbauerster/mpb/v8 v8.7.4 h1:p4f16iMfUt3PkAC73SCzAtgtSf8TYDqEbJUT3odPrPo=
|
||||
github.com/vbauerster/mpb/v8 v8.7.4/go.mod h1:r1B5k2Ljj5KJFCekfihbiqyV4VaaRTANYmvWA2btufI=
|
||||
github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U=
|
||||
github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 h1:LH5R3lXPfE2e3lIGxN7WNWv3Hl5nWO6LRi2B0L0ERHw=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1/go.mod h1:4XMvznPh8nfe2NpnDo1QTPvW9MVkUhbG90mPWvmOzcQ=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@@ -333,49 +342,52 @@ github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
|
||||
github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
|
||||
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
|
||||
github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
|
||||
github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s=
|
||||
github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM=
|
||||
github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q=
|
||||
github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg=
|
||||
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
|
||||
github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
|
||||
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
|
||||
github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
|
||||
github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU=
|
||||
github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
|
||||
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
|
||||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@@ -383,8 +395,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -398,11 +413,14 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -410,8 +428,11 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -423,26 +444,35 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
|
||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -453,8 +483,10 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -464,18 +496,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
|
||||
google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -485,8 +517,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -29,6 +29,6 @@ $CONTAINER_RUNTIME run --rm \
|
||||
--entrypoint=/usr/share/automation/bin/cirrus-ci_env.py \
|
||||
quay.io/libpod/get_ci_vm:latest \
|
||||
--envs="Skopeo Test" /src/.cirrus.yml | \
|
||||
egrep -m1 '^SKOPEO_CIDEV_CONTAINER_FQIN' | \
|
||||
grep -E -m1 '^SKOPEO_CIDEV_CONTAINER_FQIN' | \
|
||||
awk -F "=" -e '{print $2}' | \
|
||||
tr -d \'\"
|
||||
|
||||
@@ -29,7 +29,7 @@ rc=0
|
||||
# for a given skopeo-foo.1.md, the NAME should be 'skopeo-foo'
|
||||
for md in *.1.md;do
|
||||
# Read the first line after '## NAME'
|
||||
name=$(egrep -A1 '^## NAME' $md|tail -1|awk '{print $1}' | tr -d \\\\)
|
||||
name=$(grep -E -A1 '^## NAME' $md|tail -1|awk '{print $1}' | tr -d \\\\)
|
||||
|
||||
expect=$(basename $md .1.md)
|
||||
if [ "$name" != "$expect" ]; then
|
||||
@@ -45,7 +45,7 @@ done
|
||||
# Make sure the descriptive text in skopeo-foo.1.md matches the one
|
||||
# in the table in skopeo.1.md.
|
||||
for md in $(ls -1 *-*.1.md);do
|
||||
desc=$(egrep -A1 '^## NAME' $md|tail -1|sed -E -e 's/^skopeo[^[:space:]]+ - //')
|
||||
desc=$(grep -E -A1 '^## NAME' $md|tail -1|sed -E -e 's/^skopeo[^[:space:]]+ - //')
|
||||
|
||||
# Find the descriptive text in the main skopeo man page.
|
||||
parent=skopeo.1.md
|
||||
@@ -112,7 +112,7 @@ function compare_usage() {
|
||||
#
|
||||
# Make sure the SYNOPSIS line in skopeo-foo.1.md reads '**skopeo foo** ...'
|
||||
for md in *.1.md;do
|
||||
synopsis=$(egrep -A1 '^#* SYNOPSIS' $md|tail -1)
|
||||
synopsis=$(grep -E -A1 '^#* SYNOPSIS' $md|tail -1)
|
||||
|
||||
# Command name must be bracketed by double asterisks; options and
|
||||
# arguments are bracketed by single ones.
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
# TMT breaks this so we only check go.* and vendor
|
||||
# https://github.com/teemtee/tmt/issues/3800
|
||||
# STATUS=$(git status --porcelain)
|
||||
STATUS=$(git status --porcelain go.* vendor)
|
||||
if [[ -z $STATUS ]]
|
||||
then
|
||||
echo "tree is clean"
|
||||
|
||||
@@ -139,7 +139,7 @@ located at [https://github.com/containers/image_build/tree/main/skopeo](https://
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
To build the `skopeo` binary you need at least Go 1.21.
|
||||
To build the `skopeo` binary you need at least Go 1.22.
|
||||
|
||||
There are two ways to build skopeo: in a container, or locally without a
|
||||
container. Choose the one which better matches your needs and environment.
|
||||
|
||||
@@ -30,7 +30,7 @@ func setupRegistryV2At(t *testing.T, url string, auth, schema1 bool) *testRegist
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for registry to be ready to serve requests.
|
||||
for i := 0; i != 50; i++ {
|
||||
for range 50 {
|
||||
if err = reg.Ping(); err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
23
plans/no-rpm.fmf
Normal file
23
plans/no-rpm.fmf
Normal file
@@ -0,0 +1,23 @@
|
||||
prepare:
|
||||
- name: dependencies
|
||||
how: install
|
||||
package: [ golang, go-md2man, gpgme-devel, podman-docker ]
|
||||
|
||||
discover:
|
||||
keep-git-metadata: true
|
||||
how: shell
|
||||
tests:
|
||||
- name: /validate
|
||||
test: >
|
||||
make tools &&
|
||||
make BUILDTAGS="exclude_graphdriver_btrfs" validate-local &&
|
||||
make validate-docs &&
|
||||
make vendor &&
|
||||
hack/tree_status.sh
|
||||
- name: /integration
|
||||
test: make BUILDTAGS="exclude_graphdriver_btrfs" test-integration
|
||||
- name: /unit
|
||||
test: make BUILDTAGS="exclude_graphdriver_btrfs" test-unit-local
|
||||
|
||||
execute:
|
||||
how: tmt
|
||||
13
plans/system.fmf
Normal file
13
plans/system.fmf
Normal file
@@ -0,0 +1,13 @@
|
||||
discover:
|
||||
how: fmf
|
||||
filter: 'tag:system'
|
||||
execute:
|
||||
how: tmt
|
||||
prepare:
|
||||
- how: shell
|
||||
script: |
|
||||
BATS_VERSION=1.12.0
|
||||
curl -L https://github.com/bats-core/bats-core/archive/refs/tags/v"$BATS_VERSION".tar.gz | tar -xz
|
||||
cd bats-core-"$BATS_VERSION"
|
||||
./install.sh /usr
|
||||
order: 10
|
||||
167
rpm/skopeo.spec
167
rpm/skopeo.spec
@@ -1,167 +0,0 @@
|
||||
%global with_debug 1
|
||||
|
||||
%if 0%{?with_debug}
|
||||
%global _find_debuginfo_dwz_opts %{nil}
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%else
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
# RHEL's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we
|
||||
# set it separately here and do not depend on RHEL's go-[s]rpm-macros package
|
||||
# until that's fixed.
|
||||
# c9s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227328
|
||||
# c8s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227331
|
||||
%if %{defined rhel}
|
||||
%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
%global gomodulesmode GO111MODULE=on
|
||||
|
||||
# No btrfs on RHEL
|
||||
%if %{defined fedora}
|
||||
%define build_with_btrfs 1
|
||||
%endif
|
||||
|
||||
# Only used in official koji builds
|
||||
# Copr builds set a separate epoch for all environments
|
||||
%if %{defined fedora}
|
||||
%define conditional_epoch 1
|
||||
%else
|
||||
%define conditional_epoch 2
|
||||
%endif
|
||||
|
||||
Name: skopeo
|
||||
%if %{defined copr_username}
|
||||
Epoch: 102
|
||||
%else
|
||||
Epoch: %{conditional_epoch}
|
||||
%endif
|
||||
# DO NOT TOUCH the Version string!
|
||||
# The TRUE source of this specfile is:
|
||||
# https://github.com/containers/skopeo/blob/main/rpm/skopeo.spec
|
||||
# If that's what you're reading, Version must be 0, and will be updated by Packit for
|
||||
# copr and koji builds.
|
||||
# If you're reading this on dist-git, the version is automatically filled in by Packit.
|
||||
Version: 0
|
||||
# The `AND` needs to be uppercase in the License for SPDX compatibility
|
||||
License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0
|
||||
Release: %autorelease
|
||||
%if %{defined golang_arches_future}
|
||||
ExclusiveArch: %{golang_arches_future}
|
||||
%else
|
||||
ExclusiveArch: aarch64 ppc64le s390x x86_64
|
||||
%endif
|
||||
Summary: Inspect container images and repositories on registries
|
||||
URL: https://github.com/containers/%{name}
|
||||
# Tarball fetched from upstream
|
||||
Source0: %{url}/archive/v%{version}.tar.gz
|
||||
BuildRequires: %{_bindir}/go-md2man
|
||||
%if %{defined build_with_btrfs}
|
||||
BuildRequires: btrfs-progs-devel
|
||||
%endif
|
||||
BuildRequires: git-core
|
||||
BuildRequires: golang
|
||||
%if !%{defined gobuild}
|
||||
BuildRequires: go-rpm-macros
|
||||
%endif
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: ostree-devel
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: make
|
||||
BuildRequires: shadow-utils-subid-devel
|
||||
Requires: containers-common >= 4:1-21
|
||||
|
||||
%description
|
||||
Command line utility to inspect images and repositories directly on Docker
|
||||
registries without the need to pull them
|
||||
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
%if %{defined fedora}
|
||||
Requires: bats
|
||||
%endif
|
||||
Requires: gnupg
|
||||
Requires: jq
|
||||
Requires: golang
|
||||
Requires: podman
|
||||
Requires: crun
|
||||
Requires: httpd-tools
|
||||
Requires: openssl
|
||||
Requires: fakeroot
|
||||
Requires: squashfs-tools
|
||||
|
||||
%description tests
|
||||
%{summary}
|
||||
|
||||
This package contains system tests for %{name}
|
||||
|
||||
%prep
|
||||
%autosetup -Sgit %{name}-%{version}
|
||||
# The %%install stage should not rebuild anything but only install what's
|
||||
# built in the %%build stage. So, remove any dependency on build targets.
|
||||
sed -i 's/^install-binary: bin\/%{name}.*/install-binary:/' Makefile
|
||||
sed -i 's/^completions: bin\/%{name}.*/completions:/' Makefile
|
||||
sed -i 's/^install-docs: docs.*/install-docs:/' Makefile
|
||||
|
||||
%build
|
||||
%set_build_flags
|
||||
export CGO_CFLAGS=$CFLAGS
|
||||
|
||||
# These extra flags present in $CFLAGS have been skipped for now as they break the build
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
|
||||
|
||||
%ifarch x86_64
|
||||
export CGO_CFLAGS="$CGO_CFLAGS -m64 -mtune=generic -fcf-protection=full"
|
||||
%endif
|
||||
|
||||
BASEBUILDTAGS="$(hack/libsubid_tag.sh)"
|
||||
%if %{defined build_with_btrfs}
|
||||
export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_tag.sh) $(hack/btrfs_installed_tag.sh)"
|
||||
%else
|
||||
export BUILDTAGS="$BASEBUILDTAGS btrfs_noversion exclude_graphdriver_btrfs"
|
||||
%endif
|
||||
|
||||
# unset LDFLAGS earlier set from set_build_flags
|
||||
LDFLAGS=''
|
||||
|
||||
%gobuild -o bin/%{name} ./cmd/%{name}
|
||||
%{__make} docs
|
||||
|
||||
%install
|
||||
make \
|
||||
DESTDIR=%{buildroot} \
|
||||
PREFIX=%{_prefix} \
|
||||
install-binary install-docs install-completions
|
||||
|
||||
# system tests
|
||||
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp -pav systemtest/* %{buildroot}/%{_datadir}/%{name}/test/system/
|
||||
|
||||
#define license tag if not already defined
|
||||
%{!?_licensedir:%global license %doc}
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
%{_bindir}/%{name}
|
||||
%{_mandir}/man1/%{name}*
|
||||
%dir %{_datadir}/bash-completion
|
||||
%dir %{_datadir}/bash-completion/completions
|
||||
%{_datadir}/bash-completion/completions/%{name}
|
||||
%dir %{_datadir}/fish/vendor_completions.d
|
||||
%{_datadir}/fish/vendor_completions.d/%{name}.fish
|
||||
%dir %{_datadir}/zsh/site-functions
|
||||
%{_datadir}/zsh/site-functions/_%{name}
|
||||
|
||||
%files tests
|
||||
%license LICENSE vendor/modules.txt
|
||||
%{_datadir}/%{name}/test
|
||||
|
||||
%changelog
|
||||
%autochangelog
|
||||
@@ -36,7 +36,7 @@ load helpers
|
||||
# the output of 'inspect' lists layer digests,
|
||||
# but not the digest of the config blob ($config_digest), if any.
|
||||
layers=$(jq -r '.Layers' <<<"$inspect_local")
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | grep -E '^[0-9a-f]{64}$'); do
|
||||
if [ "sha256:$sha" != "$config_digest" ]; then
|
||||
expect_output --from="$layers" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
@@ -132,7 +132,7 @@ END_EXPECT
|
||||
@test "inspect: image unknown" {
|
||||
# non existing image
|
||||
run_skopeo 2 inspect containers-storage:non-existing-tag
|
||||
expect_output --substring "identifier is not an image" \
|
||||
expect_output --substring "does not resolve to an image ID" \
|
||||
"skopeo inspect containers-storage:010101010101"
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-${TEST_SOURCE_DIR}/../bin/skopeo}
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
# Default image to run as a local registry
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2.8.2}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN setup/teardown
|
||||
@@ -317,7 +317,7 @@ start_registry() {
|
||||
die "start_registry() invoked with testuser but no testpassword"
|
||||
fi
|
||||
|
||||
if ! egrep -q "^$testuser:" $AUTHDIR/htpasswd; then
|
||||
if ! grep -E -q "^$testuser:" $AUTHDIR/htpasswd; then
|
||||
htpasswd -Bbn $testuser $testpassword >> $AUTHDIR/htpasswd
|
||||
fi
|
||||
|
||||
|
||||
10
systemtest/tmt/main.fmf
Normal file
10
systemtest/tmt/main.fmf
Normal file
@@ -0,0 +1,10 @@
|
||||
require:
|
||||
- skopeo-tests
|
||||
|
||||
environment:
|
||||
SKOPEO_BINARY: /usr/bin/skopeo
|
||||
|
||||
summary: System test
|
||||
test: bash ./test.sh
|
||||
duration: 60m
|
||||
tag: [ system ]
|
||||
12
systemtest/tmt/test.sh
Normal file
12
systemtest/tmt/test.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -exo pipefail
|
||||
|
||||
uname -r
|
||||
|
||||
rpm -q \
|
||||
containers-common \
|
||||
skopeo \
|
||||
skopeo-tests \
|
||||
|
||||
bats /usr/share/skopeo/test/system
|
||||
3
vendor/dario.cat/mergo/.gitignore
vendored
3
vendor/dario.cat/mergo/.gitignore
vendored
@@ -13,6 +13,9 @@
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Golang/Intellij
|
||||
.idea
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
|
||||
102
vendor/dario.cat/mergo/README.md
vendored
102
vendor/dario.cat/mergo/README.md
vendored
@@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
|
||||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
|
||||
|
||||
### Important notes
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
|
||||
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
|
||||
|
||||
If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
|
||||
|
||||
```
|
||||
replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
|
||||
```
|
||||
|
||||
#### 0.3.9
|
||||
|
||||
@@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works
|
||||
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
|
||||
|
||||
### Mergo in the wild
|
||||
|
||||
- [moby/moby](https://github.com/moby/moby)
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||
- [Shopify/themekit](https://github.com/Shopify/themekit)
|
||||
- [imdario/zas](https://github.com/imdario/zas)
|
||||
- [matcornic/hermes](https://github.com/matcornic/hermes)
|
||||
- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
|
||||
- [kataras/iris](https://github.com/kataras/iris)
|
||||
- [michaelsauter/crane](https://github.com/michaelsauter/crane)
|
||||
- [go-task/task](https://github.com/go-task/task)
|
||||
- [sensu/uchiwa](https://github.com/sensu/uchiwa)
|
||||
- [ory/hydra](https://github.com/ory/hydra)
|
||||
- [sisatech/vcli](https://github.com/sisatech/vcli)
|
||||
- [dairycart/dairycart](https://github.com/dairycart/dairycart)
|
||||
- [projectcalico/felix](https://github.com/projectcalico/felix)
|
||||
- [resin-os/balena](https://github.com/resin-os/balena)
|
||||
- [go-kivik/kivik](https://github.com/go-kivik/kivik)
|
||||
- [Telefonica/govice](https://github.com/Telefonica/govice)
|
||||
- [supergiant/supergiant](supergiant/supergiant)
|
||||
- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
|
||||
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
|
||||
- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
|
||||
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
|
||||
- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
|
||||
- [russross/canvasassignments](https://github.com/russross/canvasassignments)
|
||||
- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
|
||||
- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
|
||||
- [divshot/gitling](https://github.com/divshot/gitling)
|
||||
- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
|
||||
- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
|
||||
- [elwinar/rambler](https://github.com/elwinar/rambler)
|
||||
- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
|
||||
- [jfbus/impressionist](https://github.com/jfbus/impressionist)
|
||||
- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
|
||||
- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
|
||||
- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
|
||||
- [thoas/picfit](https://github.com/thoas/picfit)
|
||||
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
|
||||
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
|
||||
Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
|
||||
|
||||
* [containerd/containerd](https://github.com/containerd/containerd)
|
||||
* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
|
||||
* [docker/cli/](https://github.com/docker/cli/)
|
||||
* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
* [go-micro/go-micro](https://github.com/go-micro/go-micro)
|
||||
* [grafana/loki](https://github.com/grafana/loki)
|
||||
* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
* [masterminds/sprig](github.com/Masterminds/sprig)
|
||||
* [moby/moby](https://github.com/moby/moby)
|
||||
* [slackhq/nebula](https://github.com/slackhq/nebula)
|
||||
* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
|
||||
|
||||
## Install
|
||||
|
||||
@@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
||||
}
|
||||
```
|
||||
|
||||
If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
A *string
|
||||
B int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
first := "first"
|
||||
second := "second"
|
||||
src := Foo{
|
||||
A: &first,
|
||||
B: 2,
|
||||
}
|
||||
|
||||
dest := Foo{
|
||||
A: &second,
|
||||
B: 1,
|
||||
}
|
||||
|
||||
mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
|
||||
}
|
||||
```
|
||||
|
||||
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
```go
|
||||
|
||||
2
vendor/dario.cat/mergo/map.go
vendored
2
vendor/dario.cat/mergo/map.go
vendored
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
||||
}
|
||||
fieldName := field.Name
|
||||
fieldName = changeInitialCase(fieldName, unicode.ToLower)
|
||||
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
|
||||
if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
|
||||
dstMap[fieldName] = src.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/dario.cat/mergo/merge.go
vendored
2
vendor/dario.cat/mergo/merge.go
vendored
@@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
} else if src.Elem().Kind() != reflect.Struct {
|
||||
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
|
||||
dst.Set(src)
|
||||
}
|
||||
|
||||
28
vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
generated
vendored
28
vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
generated
vendored
@@ -1,5 +1,33 @@
|
||||
# Changelog
|
||||
|
||||
## 3.3.0 (2024-08-27)
|
||||
|
||||
### Added
|
||||
|
||||
- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
|
||||
- #213: nil version equality checking (thanks @KnutZuidema)
|
||||
|
||||
### Changed
|
||||
|
||||
- #241: Simplify StrictNewVersion parsing (thanks @grosser)
|
||||
- Testing support up through Go 1.23
|
||||
- Minimum version set to 1.21 as this is what's tested now
|
||||
- Fuzz testing now supports caching
|
||||
|
||||
## 3.2.1 (2023-04-10)
|
||||
|
||||
### Changed
|
||||
|
||||
- #198: Improved testing around pre-release names
|
||||
- #200: Improved code scanning with addition of CodeQL
|
||||
- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
|
||||
- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
|
||||
- #203: Docs updated for security details
|
||||
|
||||
### Fixed
|
||||
|
||||
- #199: Fixed issue with range transformations
|
||||
|
||||
## 3.2.0 (2022-11-28)
|
||||
|
||||
### Added
|
||||
|
||||
3
vendor/github.com/Masterminds/semver/v3/Makefile
generated
vendored
3
vendor/github.com/Masterminds/semver/v3/Makefile
generated
vendored
@@ -19,6 +19,7 @@ test-cover:
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
@echo "==> Running Fuzz Tests"
|
||||
go env GOCACHE
|
||||
go test -fuzz=FuzzNewVersion -fuzztime=15s .
|
||||
go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
|
||||
go test -fuzz=FuzzNewConstraint -fuzztime=15s .
|
||||
@@ -27,4 +28,4 @@ $(GOLANGCI_LINT):
|
||||
# Install golangci-lint. The configuration for it is in the .golangci.yml
|
||||
# file in the root of the repository
|
||||
echo ${GOPATH}
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
|
||||
|
||||
28
vendor/github.com/Masterminds/semver/v3/README.md
generated
vendored
28
vendor/github.com/Masterminds/semver/v3/README.md
generated
vendored
@@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds
|
||||
[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
|
||||
[](https://goreportcard.com/report/github.com/Masterminds/semver)
|
||||
|
||||
If you are looking for a command line tool for version comparisons please see
|
||||
[vert](https://github.com/Masterminds/vert) which uses this library.
|
||||
|
||||
## Package Versions
|
||||
|
||||
Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
|
||||
Note, import `github.com/Masterminds/semver/v3` to use the latest version.
|
||||
|
||||
There are three major versions fo the `semver` package.
|
||||
|
||||
@@ -80,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on
|
||||
differences to notes between these two methods of comparison.
|
||||
|
||||
1. When two versions are compared using functions such as `Compare`, `LessThan`,
|
||||
and others it will follow the specification and always include prereleases
|
||||
and others it will follow the specification and always include pre-releases
|
||||
within the comparison. It will provide an answer that is valid with the
|
||||
comparison section of the spec at https://semver.org/#spec-item-11
|
||||
2. When constraint checking is used for checks or validation it will follow a
|
||||
different set of rules that are common for ranges with tools like npm/js
|
||||
and Rust/Cargo. This includes considering prereleases to be invalid if the
|
||||
and Rust/Cargo. This includes considering pre-releases to be invalid if the
|
||||
ranges does not include one. If you want to have it include pre-releases a
|
||||
simple solution is to include `-0` in your range.
|
||||
3. Constraint ranges can have some complex rules including the shorthand use of
|
||||
@@ -113,7 +110,7 @@ v, err := semver.NewVersion("1.3")
|
||||
if err != nil {
|
||||
// Handle version not being parsable.
|
||||
}
|
||||
// Check if the version meets the constraints. The a variable will be true.
|
||||
// Check if the version meets the constraints. The variable a will be true.
|
||||
a := c.Check(v)
|
||||
```
|
||||
|
||||
@@ -137,20 +134,20 @@ The basic comparisons are:
|
||||
### Working With Prerelease Versions
|
||||
|
||||
Pre-releases, for those not familiar with them, are used for software releases
|
||||
prior to stable or generally available releases. Examples of prereleases include
|
||||
development, alpha, beta, and release candidate releases. A prerelease may be
|
||||
prior to stable or generally available releases. Examples of pre-releases include
|
||||
development, alpha, beta, and release candidate releases. A pre-release may be
|
||||
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
|
||||
order of precedence, prereleases come before their associated releases. In this
|
||||
order of precedence, pre-releases come before their associated releases. In this
|
||||
example `1.2.3-beta.1 < 1.2.3`.
|
||||
|
||||
According to the Semantic Version specification prereleases may not be
|
||||
According to the Semantic Version specification, pre-releases may not be
|
||||
API compliant with their release counterpart. It says,
|
||||
|
||||
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
|
||||
|
||||
SemVer comparisons using constraints without a prerelease comparator will skip
|
||||
prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
|
||||
at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
|
||||
SemVer's comparisons using constraints without a pre-release comparator will skip
|
||||
pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
|
||||
at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
|
||||
|
||||
The reason for the `0` as a pre-release version in the example comparison is
|
||||
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
|
||||
@@ -171,6 +168,9 @@ These look like:
|
||||
* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
|
||||
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
|
||||
|
||||
Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
|
||||
parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
|
||||
|
||||
### Wildcards In Comparisons
|
||||
|
||||
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
|
||||
|
||||
88
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
88
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
@@ -39,9 +39,11 @@ var (
|
||||
)
|
||||
|
||||
// semVerRegex is the regular expression used to parse a semantic version.
|
||||
const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
|
||||
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
|
||||
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
|
||||
// This is not the official regex from the semver spec. It has been modified to allow for loose handling
|
||||
// where versions like 2.1 are detected.
|
||||
const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
|
||||
`(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
|
||||
`(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
|
||||
|
||||
// Version represents a single semantic version.
|
||||
type Version struct {
|
||||
@@ -83,22 +85,23 @@ func StrictNewVersion(v string) (*Version, error) {
|
||||
original: v,
|
||||
}
|
||||
|
||||
// check for prerelease or build metadata
|
||||
var extra []string
|
||||
if strings.ContainsAny(parts[2], "-+") {
|
||||
// Start with the build metadata first as it needs to be on the right
|
||||
extra = strings.SplitN(parts[2], "+", 2)
|
||||
if len(extra) > 1 {
|
||||
// build metadata found
|
||||
sv.metadata = extra[1]
|
||||
parts[2] = extra[0]
|
||||
// Extract build metadata
|
||||
if strings.Contains(parts[2], "+") {
|
||||
extra := strings.SplitN(parts[2], "+", 2)
|
||||
sv.metadata = extra[1]
|
||||
parts[2] = extra[0]
|
||||
if err := validateMetadata(sv.metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
extra = strings.SplitN(parts[2], "-", 2)
|
||||
if len(extra) > 1 {
|
||||
// prerelease found
|
||||
sv.pre = extra[1]
|
||||
parts[2] = extra[0]
|
||||
// Extract build prerelease
|
||||
if strings.Contains(parts[2], "-") {
|
||||
extra := strings.SplitN(parts[2], "-", 2)
|
||||
sv.pre = extra[1]
|
||||
parts[2] = extra[0]
|
||||
if err := validatePrerelease(sv.pre); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +117,7 @@ func StrictNewVersion(v string) (*Version, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the major, minor, and patch elements onto the returned Version
|
||||
// Extract major, minor, and patch
|
||||
var err error
|
||||
sv.major, err = strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
@@ -131,23 +134,6 @@ func StrictNewVersion(v string) (*Version, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No prerelease or build metadata found so returning now as a fastpath.
|
||||
if sv.pre == "" && sv.metadata == "" {
|
||||
return sv, nil
|
||||
}
|
||||
|
||||
if sv.pre != "" {
|
||||
if err = validatePrerelease(sv.pre); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if sv.metadata != "" {
|
||||
if err = validateMetadata(sv.metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return sv, nil
|
||||
}
|
||||
|
||||
@@ -162,8 +148,8 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
sv := &Version{
|
||||
metadata: m[8],
|
||||
pre: m[5],
|
||||
metadata: m[5],
|
||||
pre: m[4],
|
||||
original: v,
|
||||
}
|
||||
|
||||
@@ -174,7 +160,7 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
if m[2] != "" {
|
||||
sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
|
||||
sv.minor, err = strconv.ParseUint(m[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
}
|
||||
@@ -183,7 +169,7 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
if m[3] != "" {
|
||||
sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
|
||||
sv.patch, err = strconv.ParseUint(m[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
}
|
||||
@@ -381,15 +367,31 @@ func (v *Version) LessThan(o *Version) bool {
|
||||
return v.Compare(o) < 0
|
||||
}
|
||||
|
||||
// LessThanEqual tests if one version is less or equal than another one.
|
||||
func (v *Version) LessThanEqual(o *Version) bool {
|
||||
return v.Compare(o) <= 0
|
||||
}
|
||||
|
||||
// GreaterThan tests if one version is greater than another one.
|
||||
func (v *Version) GreaterThan(o *Version) bool {
|
||||
return v.Compare(o) > 0
|
||||
}
|
||||
|
||||
// GreaterThanEqual tests if one version is greater or equal than another one.
|
||||
func (v *Version) GreaterThanEqual(o *Version) bool {
|
||||
return v.Compare(o) >= 0
|
||||
}
|
||||
|
||||
// Equal tests if two versions are equal to each other.
|
||||
// Note, versions can be equal with different metadata since metadata
|
||||
// is not considered part of the comparable version.
|
||||
func (v *Version) Equal(o *Version) bool {
|
||||
if v == o {
|
||||
return true
|
||||
}
|
||||
if v == nil || o == nil {
|
||||
return false
|
||||
}
|
||||
return v.Compare(o) == 0
|
||||
}
|
||||
|
||||
@@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool {
|
||||
func validatePrerelease(p string) error {
|
||||
eparts := strings.Split(p, ".")
|
||||
for _, p := range eparts {
|
||||
if containsOnly(p, num) {
|
||||
if p == "" {
|
||||
return ErrInvalidMetadata
|
||||
} else if containsOnly(p, num) {
|
||||
if len(p) > 1 && p[0] == '0' {
|
||||
return ErrSegmentStartsZero
|
||||
}
|
||||
@@ -631,7 +635,9 @@ func validatePrerelease(p string) error {
|
||||
func validateMetadata(m string) error {
|
||||
eparts := strings.Split(m, ".")
|
||||
for _, p := range eparts {
|
||||
if !containsOnly(p, allowed) {
|
||||
if p == "" {
|
||||
return ErrInvalidMetadata
|
||||
} else if !containsOnly(p, allowed) {
|
||||
return ErrInvalidMetadata
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
@@ -29,7 +29,7 @@ const (
|
||||
)
|
||||
|
||||
func (es EndpointState) String() string {
|
||||
return [...]string{"Uninitialized", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es]
|
||||
return [...]string{"Uninitialized", "Created", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es]
|
||||
}
|
||||
|
||||
// HNSEndpoint represents a network endpoint in HNS
|
||||
|
||||
11
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
@@ -188,7 +188,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||
return nil, winapi.RtlNtStatusToDosError(status)
|
||||
}
|
||||
} else {
|
||||
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, 0, unicodeJobName.Buffer)
|
||||
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -523,12 +523,9 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error
|
||||
func isJobSilo(h windows.Handle) bool {
|
||||
// None of the information from the structure that this info class expects will be used, this is just used as
|
||||
// the call will fail if the job hasn't been upgraded to a silo so we can use this to tell when we open a job
|
||||
// if it's a silo or not. Because none of the info matters simply define a dummy struct with the size that the call
|
||||
// expects which is 16 bytes.
|
||||
type isSiloObj struct {
|
||||
_ [16]byte
|
||||
}
|
||||
var siloInfo isSiloObj
|
||||
// if it's a silo or not. We still need to define the struct layout as expected by Win32, else the struct
|
||||
// alignment might be different and the call will fail.
|
||||
var siloInfo winapi.SILOOBJECT_BASIC_INFORMATION
|
||||
err := winapi.QueryInformationJobObject(
|
||||
h,
|
||||
winapi.JobObjectSiloBasicInformation,
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
errdefs "github.com/containerd/errdefs/pkg/errgrpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
19
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
19
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
@@ -28,7 +28,7 @@ const (
|
||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
|
||||
const (
|
||||
JOB_OBJECT_QUERY = 0x0004
|
||||
JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
||||
JOB_OBJECT_ALL_ACCESS = 0x1F003F
|
||||
)
|
||||
|
||||
// IO limit flags
|
||||
@@ -160,6 +160,21 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
CompletionPort windows.Handle
|
||||
}
|
||||
|
||||
// typedef struct _SILOOBJECT_BASIC_INFORMATION {
|
||||
// DWORD SiloId;
|
||||
// DWORD SiloParentId;
|
||||
// DWORD NumberOfProcesses;
|
||||
// BOOLEAN IsInServerSilo;
|
||||
// BYTE Reserved[3];
|
||||
// } SILOOBJECT_BASIC_INFORMATION, *PSILOOBJECT_BASIC_INFORMATION;
|
||||
type SILOOBJECT_BASIC_INFORMATION struct {
|
||||
SiloID uint32
|
||||
SiloParentID uint32
|
||||
NumberOfProcesses uint32
|
||||
IsInServerSilo bool
|
||||
Reserved [3]uint8
|
||||
}
|
||||
|
||||
// BOOL IsProcessInJob(
|
||||
// HANDLE ProcessHandle,
|
||||
// HANDLE JobHandle,
|
||||
@@ -184,7 +199,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
// LPCWSTR lpName
|
||||
// );
|
||||
//
|
||||
//sys OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW
|
||||
//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW
|
||||
|
||||
// DWORD SetIoRateControlInformationJobObject(
|
||||
// HANDLE hJob,
|
||||
|
||||
8
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@@ -470,8 +470,12 @@ func LocalFree(ptr uintptr) {
|
||||
return
|
||||
}
|
||||
|
||||
func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName)))
|
||||
func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) {
|
||||
var _p0 uint32
|
||||
if inheritHandle {
|
||||
_p0 = 1
|
||||
}
|
||||
r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName)))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == 0 {
|
||||
err = errnoErr(e1)
|
||||
|
||||
439
vendor/github.com/containerd/errdefs/errors.go
generated
vendored
439
vendor/github.com/containerd/errdefs/errors.go
generated
vendored
@@ -21,9 +21,6 @@
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
@@ -36,57 +33,411 @@ import (
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
// These errors map closely to grpc errors.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
ErrUnknown = errUnknown{}
|
||||
ErrInvalidArgument = errInvalidArgument{}
|
||||
ErrNotFound = errNotFound{}
|
||||
ErrAlreadyExists = errAlreadyExists{}
|
||||
ErrPermissionDenied = errPermissionDenied{}
|
||||
ErrResourceExhausted = errResourceExhausted{}
|
||||
ErrFailedPrecondition = errFailedPrecondition{}
|
||||
ErrConflict = errConflict{}
|
||||
ErrNotModified = errNotModified{}
|
||||
ErrAborted = errAborted{}
|
||||
ErrOutOfRange = errOutOfRange{}
|
||||
ErrNotImplemented = errNotImplemented{}
|
||||
ErrInternal = errInternal{}
|
||||
ErrUnavailable = errUnavailable{}
|
||||
ErrDataLoss = errDataLoss{}
|
||||
ErrUnauthenticated = errUnauthorized{}
|
||||
)
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Is(err, ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound)
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Is(err, ErrAlreadyExists)
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Is(err, ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Is(err, ErrUnavailable)
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Is(err, ErrNotImplemented)
|
||||
// cancelled maps to Moby's "ErrCancelled"
|
||||
type cancelled interface {
|
||||
Cancelled()
|
||||
}
|
||||
|
||||
// IsCanceled returns true if the error is due to `context.Canceled`.
|
||||
func IsCanceled(err error) bool {
|
||||
return errors.Is(err, context.Canceled)
|
||||
return errors.Is(err, context.Canceled) || isInterface[cancelled](err)
|
||||
}
|
||||
|
||||
type errUnknown struct{}
|
||||
|
||||
func (errUnknown) Error() string { return "unknown" }
|
||||
|
||||
func (errUnknown) Unknown() {}
|
||||
|
||||
func (e errUnknown) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// unknown maps to Moby's "ErrUnknown"
|
||||
type unknown interface {
|
||||
Unknown()
|
||||
}
|
||||
|
||||
// IsUnknown returns true if the error is due to an unknown error,
|
||||
// unhandled condition or unexpected response.
|
||||
func IsUnknown(err error) bool {
|
||||
return errors.Is(err, errUnknown{}) || isInterface[unknown](err)
|
||||
}
|
||||
|
||||
type errInvalidArgument struct{}
|
||||
|
||||
func (errInvalidArgument) Error() string { return "invalid argument" }
|
||||
|
||||
func (errInvalidArgument) InvalidParameter() {}
|
||||
|
||||
func (e errInvalidArgument) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// invalidParameter maps to Moby's "ErrInvalidParameter"
|
||||
type invalidParameter interface {
|
||||
InvalidParameter()
|
||||
}
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Is(err, ErrInvalidArgument) || isInterface[invalidParameter](err)
|
||||
}
|
||||
|
||||
// deadlineExceed maps to Moby's "ErrDeadline"
|
||||
type deadlineExceeded interface {
|
||||
DeadlineExceeded()
|
||||
}
|
||||
|
||||
// IsDeadlineExceeded returns true if the error is due to
|
||||
// `context.DeadlineExceeded`.
|
||||
func IsDeadlineExceeded(err error) bool {
|
||||
return errors.Is(err, context.DeadlineExceeded)
|
||||
return errors.Is(err, context.DeadlineExceeded) || isInterface[deadlineExceeded](err)
|
||||
}
|
||||
|
||||
type errNotFound struct{}
|
||||
|
||||
func (errNotFound) Error() string { return "not found" }
|
||||
|
||||
func (errNotFound) NotFound() {}
|
||||
|
||||
func (e errNotFound) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// notFound maps to Moby's "ErrNotFound"
|
||||
type notFound interface {
|
||||
NotFound()
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound) || isInterface[notFound](err)
|
||||
}
|
||||
|
||||
type errAlreadyExists struct{}
|
||||
|
||||
func (errAlreadyExists) Error() string { return "already exists" }
|
||||
|
||||
func (errAlreadyExists) AlreadyExists() {}
|
||||
|
||||
func (e errAlreadyExists) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type alreadyExists interface {
|
||||
AlreadyExists()
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Is(err, ErrAlreadyExists) || isInterface[alreadyExists](err)
|
||||
}
|
||||
|
||||
type errPermissionDenied struct{}
|
||||
|
||||
func (errPermissionDenied) Error() string { return "permission denied" }
|
||||
|
||||
func (errPermissionDenied) Forbidden() {}
|
||||
|
||||
func (e errPermissionDenied) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// forbidden maps to Moby's "ErrForbidden"
|
||||
type forbidden interface {
|
||||
Forbidden()
|
||||
}
|
||||
|
||||
// IsPermissionDenied returns true if the error is due to permission denied
|
||||
// or forbidden (403) response
|
||||
func IsPermissionDenied(err error) bool {
|
||||
return errors.Is(err, ErrPermissionDenied) || isInterface[forbidden](err)
|
||||
}
|
||||
|
||||
type errResourceExhausted struct{}
|
||||
|
||||
func (errResourceExhausted) Error() string { return "resource exhausted" }
|
||||
|
||||
func (errResourceExhausted) ResourceExhausted() {}
|
||||
|
||||
func (e errResourceExhausted) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type resourceExhausted interface {
|
||||
ResourceExhausted()
|
||||
}
|
||||
|
||||
// IsResourceExhausted returns true if the error is due to
|
||||
// a lack of resources or too many attempts.
|
||||
func IsResourceExhausted(err error) bool {
|
||||
return errors.Is(err, errResourceExhausted{}) || isInterface[resourceExhausted](err)
|
||||
}
|
||||
|
||||
type errFailedPrecondition struct{}
|
||||
|
||||
func (e errFailedPrecondition) Error() string { return "failed precondition" }
|
||||
|
||||
func (errFailedPrecondition) FailedPrecondition() {}
|
||||
|
||||
func (e errFailedPrecondition) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type failedPrecondition interface {
|
||||
FailedPrecondition()
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed due to
|
||||
// the lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Is(err, errFailedPrecondition{}) || isInterface[failedPrecondition](err)
|
||||
}
|
||||
|
||||
type errConflict struct{}
|
||||
|
||||
func (errConflict) Error() string { return "conflict" }
|
||||
|
||||
func (errConflict) Conflict() {}
|
||||
|
||||
func (e errConflict) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// conflict maps to Moby's "ErrConflict"
|
||||
type conflict interface {
|
||||
Conflict()
|
||||
}
|
||||
|
||||
// IsConflict returns true if an operation could not proceed due to
|
||||
// a conflict.
|
||||
func IsConflict(err error) bool {
|
||||
return errors.Is(err, errConflict{}) || isInterface[conflict](err)
|
||||
}
|
||||
|
||||
type errNotModified struct{}
|
||||
|
||||
func (errNotModified) Error() string { return "not modified" }
|
||||
|
||||
func (errNotModified) NotModified() {}
|
||||
|
||||
func (e errNotModified) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// notModified maps to Moby's "ErrNotModified"
|
||||
type notModified interface {
|
||||
NotModified()
|
||||
}
|
||||
|
||||
// IsNotModified returns true if an operation could not proceed due
|
||||
// to an object not modified from a previous state.
|
||||
func IsNotModified(err error) bool {
|
||||
return errors.Is(err, errNotModified{}) || isInterface[notModified](err)
|
||||
}
|
||||
|
||||
type errAborted struct{}
|
||||
|
||||
func (errAborted) Error() string { return "aborted" }
|
||||
|
||||
func (errAborted) Aborted() {}
|
||||
|
||||
func (e errAborted) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type aborted interface {
|
||||
Aborted()
|
||||
}
|
||||
|
||||
// IsAborted returns true if an operation was aborted.
|
||||
func IsAborted(err error) bool {
|
||||
return errors.Is(err, errAborted{}) || isInterface[aborted](err)
|
||||
}
|
||||
|
||||
type errOutOfRange struct{}
|
||||
|
||||
func (errOutOfRange) Error() string { return "out of range" }
|
||||
|
||||
func (errOutOfRange) OutOfRange() {}
|
||||
|
||||
func (e errOutOfRange) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type outOfRange interface {
|
||||
OutOfRange()
|
||||
}
|
||||
|
||||
// IsOutOfRange returns true if an operation could not proceed due
|
||||
// to data being out of the expected range.
|
||||
func IsOutOfRange(err error) bool {
|
||||
return errors.Is(err, errOutOfRange{}) || isInterface[outOfRange](err)
|
||||
}
|
||||
|
||||
type errNotImplemented struct{}
|
||||
|
||||
func (errNotImplemented) Error() string { return "not implemented" }
|
||||
|
||||
func (errNotImplemented) NotImplemented() {}
|
||||
|
||||
func (e errNotImplemented) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// notImplemented maps to Moby's "ErrNotImplemented"
|
||||
type notImplemented interface {
|
||||
NotImplemented()
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Is(err, errNotImplemented{}) || isInterface[notImplemented](err)
|
||||
}
|
||||
|
||||
type errInternal struct{}
|
||||
|
||||
func (errInternal) Error() string { return "internal" }
|
||||
|
||||
func (errInternal) System() {}
|
||||
|
||||
func (e errInternal) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// system maps to Moby's "ErrSystem"
|
||||
type system interface {
|
||||
System()
|
||||
}
|
||||
|
||||
// IsInternal returns true if the error returns to an internal or system error
|
||||
func IsInternal(err error) bool {
|
||||
return errors.Is(err, errInternal{}) || isInterface[system](err)
|
||||
}
|
||||
|
||||
type errUnavailable struct{}
|
||||
|
||||
func (errUnavailable) Error() string { return "unavailable" }
|
||||
|
||||
func (errUnavailable) Unavailable() {}
|
||||
|
||||
func (e errUnavailable) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// unavailable maps to Moby's "ErrUnavailable"
|
||||
type unavailable interface {
|
||||
Unavailable()
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Is(err, errUnavailable{}) || isInterface[unavailable](err)
|
||||
}
|
||||
|
||||
type errDataLoss struct{}
|
||||
|
||||
func (errDataLoss) Error() string { return "data loss" }
|
||||
|
||||
func (errDataLoss) DataLoss() {}
|
||||
|
||||
func (e errDataLoss) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// dataLoss maps to Moby's "ErrDataLoss"
|
||||
type dataLoss interface {
|
||||
DataLoss()
|
||||
}
|
||||
|
||||
// IsDataLoss returns true if data during an operation was lost or corrupted
|
||||
func IsDataLoss(err error) bool {
|
||||
return errors.Is(err, errDataLoss{}) || isInterface[dataLoss](err)
|
||||
}
|
||||
|
||||
type errUnauthorized struct{}
|
||||
|
||||
func (errUnauthorized) Error() string { return "unauthorized" }
|
||||
|
||||
func (errUnauthorized) Unauthorized() {}
|
||||
|
||||
func (e errUnauthorized) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// unauthorized maps to Moby's "ErrUnauthorized"
|
||||
type unauthorized interface {
|
||||
Unauthorized()
|
||||
}
|
||||
|
||||
// IsUnauthorized returns true if the error indicates that the user was
|
||||
// unauthenticated or unauthorized.
|
||||
func IsUnauthorized(err error) bool {
|
||||
return errors.Is(err, errUnauthorized{}) || isInterface[unauthorized](err)
|
||||
}
|
||||
|
||||
func isInterface[T any](err error) bool {
|
||||
for {
|
||||
switch x := err.(type) {
|
||||
case T:
|
||||
return true
|
||||
case customMessage:
|
||||
err = x.err
|
||||
case interface{ Unwrap() error }:
|
||||
err = x.Unwrap()
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
case interface{ Unwrap() []error }:
|
||||
for _, err := range x.Unwrap() {
|
||||
if isInterface[T](err) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// customMessage is used to provide a defined error with a custom message.
|
||||
// The message is not wrapped but can be compared by the `Is(error) bool` interface.
|
||||
type customMessage struct {
|
||||
err error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (c customMessage) Is(err error) bool {
|
||||
return c.err == err
|
||||
}
|
||||
|
||||
func (c customMessage) As(target any) bool {
|
||||
return errors.As(c.err, target)
|
||||
}
|
||||
|
||||
func (c customMessage) Error() string {
|
||||
return c.msg
|
||||
}
|
||||
|
||||
147
vendor/github.com/containerd/errdefs/grpc.go
generated
vendored
147
vendor/github.com/containerd/errdefs/grpc.go
generated
vendored
@@ -1,147 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
case IsCanceled(err):
|
||||
return status.Errorf(codes.Canceled, err.Error())
|
||||
case IsDeadlineExceeded(err):
|
||||
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else {
|
||||
err = cls
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
191
vendor/github.com/containerd/errdefs/pkg/LICENSE
generated
vendored
Normal file
191
vendor/github.com/containerd/errdefs/pkg/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
353
vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
generated
vendored
Normal file
353
vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
generated
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errgrpc provides utility functions for translating errors to
|
||||
// and from a gRPC context.
|
||||
//
|
||||
// The functions ToGRPC and ToNative can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/protoadapt"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/containerd/typeurl/v2"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/internal/cause"
|
||||
"github.com/containerd/errdefs/pkg/internal/types"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the error into a grpc error, from the error types
|
||||
// defined in the the errdefs package and attempign to preserve the original
|
||||
// description. Any type which does not resolve to a defined error type will
|
||||
// be assigned the unknown error code.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type. The grpc error details will be used to attempt to preserve as much of
|
||||
// the error structures and types as possible.
|
||||
//
|
||||
// Errors which can be marshaled using protobuf or typeurl will be considered
|
||||
// for including as GRPC error details.
|
||||
// Additionally, use the following interfaces in errors to preserve custom types:
|
||||
//
|
||||
// WrapError(error) error - Used to wrap the previous error
|
||||
// JoinErrors(...error) error - Used to join all previous errors
|
||||
// CollapseError() - Used for errors which carry information but
|
||||
// should not have their error message shown.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := status.FromError(err); ok {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
st := statusFromError(err)
|
||||
if st != nil {
|
||||
if details := errorDetails(err, false); len(details) > 0 {
|
||||
if ds, _ := st.WithDetails(details...); ds != nil {
|
||||
st = ds
|
||||
}
|
||||
}
|
||||
err = st.Err()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func statusFromError(err error) *status.Status {
|
||||
switch errdefs.Resolve(err) {
|
||||
case errdefs.ErrInvalidArgument:
|
||||
return status.New(codes.InvalidArgument, err.Error())
|
||||
case errdefs.ErrNotFound:
|
||||
return status.New(codes.NotFound, err.Error())
|
||||
case errdefs.ErrAlreadyExists:
|
||||
return status.New(codes.AlreadyExists, err.Error())
|
||||
case errdefs.ErrPermissionDenied:
|
||||
return status.New(codes.PermissionDenied, err.Error())
|
||||
case errdefs.ErrResourceExhausted:
|
||||
return status.New(codes.ResourceExhausted, err.Error())
|
||||
case errdefs.ErrFailedPrecondition, errdefs.ErrConflict, errdefs.ErrNotModified:
|
||||
return status.New(codes.FailedPrecondition, err.Error())
|
||||
case errdefs.ErrAborted:
|
||||
return status.New(codes.Aborted, err.Error())
|
||||
case errdefs.ErrOutOfRange:
|
||||
return status.New(codes.OutOfRange, err.Error())
|
||||
case errdefs.ErrNotImplemented:
|
||||
return status.New(codes.Unimplemented, err.Error())
|
||||
case errdefs.ErrInternal:
|
||||
return status.New(codes.Internal, err.Error())
|
||||
case errdefs.ErrUnavailable:
|
||||
return status.New(codes.Unavailable, err.Error())
|
||||
case errdefs.ErrDataLoss:
|
||||
return status.New(codes.DataLoss, err.Error())
|
||||
case errdefs.ErrUnauthenticated:
|
||||
return status.New(codes.Unauthenticated, err.Error())
|
||||
case context.DeadlineExceeded:
|
||||
return status.New(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.New(codes.Canceled, err.Error())
|
||||
case errdefs.ErrUnknown:
|
||||
return status.New(codes.Unknown, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorDetails returns an array of errors which make up the provided error.
|
||||
// If firstIncluded is true, then all encodable errors will be used, otherwise
|
||||
// the first error in an error list will be not be used, to account for the
|
||||
// the base status error which details are added to via wrap or join.
|
||||
//
|
||||
// The errors are ordered in way that they can be applied in order by either
|
||||
// wrapping or joining the errors to recreate an error with the same structure
|
||||
// when `WrapError` and `JoinErrors` interfaces are used.
|
||||
//
|
||||
// The intent is that when re-applying the errors to create a single error, the
|
||||
// results of calls to `Error()`, `errors.Is`, `errors.As`, and "%+v" formatting
|
||||
// is the same as the original error.
|
||||
func errorDetails(err error, firstIncluded bool) []protoadapt.MessageV1 {
|
||||
switch uerr := err.(type) {
|
||||
case interface{ Unwrap() error }:
|
||||
details := errorDetails(uerr.Unwrap(), firstIncluded)
|
||||
|
||||
// If the type is able to wrap, then include if proto
|
||||
if _, ok := err.(interface{ WrapError(error) error }); ok {
|
||||
// Get proto message
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
details = append(details, protoErr)
|
||||
}
|
||||
}
|
||||
|
||||
return details
|
||||
case interface{ Unwrap() []error }:
|
||||
var details []protoadapt.MessageV1
|
||||
for i, e := range uerr.Unwrap() {
|
||||
details = append(details, errorDetails(e, firstIncluded || i > 0)...)
|
||||
}
|
||||
|
||||
if _, ok := err.(interface{ JoinErrors(...error) error }); ok {
|
||||
// Get proto message
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
details = append(details, protoErr)
|
||||
}
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
if firstIncluded {
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
return []protoadapt.MessageV1{protoErr}
|
||||
}
|
||||
if gs, ok := status.FromError(ToGRPC(err)); ok {
|
||||
return []protoadapt.MessageV1{gs.Proto()}
|
||||
}
|
||||
// TODO: Else include unknown extra error type?
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toProtoMessage(err error) protoadapt.MessageV1 {
|
||||
// Do not double encode proto messages, otherwise use Any
|
||||
if pm, ok := err.(protoadapt.MessageV1); ok {
|
||||
return pm
|
||||
}
|
||||
if pm, ok := err.(proto.Message); ok {
|
||||
return protoadapt.MessageV1Of(pm)
|
||||
}
|
||||
|
||||
if reflect.TypeOf(err).Kind() == reflect.Ptr {
|
||||
a, aerr := typeurl.MarshalAny(err)
|
||||
if aerr == nil {
|
||||
return &anypb.Any{
|
||||
TypeUrl: a.GetTypeUrl(),
|
||||
Value: a.GetValue(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to grpc.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// ToNative returns the underlying error from a grpc service based on the grpc
|
||||
// error code. The grpc details are used to add wrap the error in more context
|
||||
// or support multiple errors.
|
||||
func ToNative(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, isGRPC := status.FromError(err)
|
||||
|
||||
var (
|
||||
desc string
|
||||
code codes.Code
|
||||
)
|
||||
|
||||
if isGRPC {
|
||||
desc = s.Message()
|
||||
code = s.Code()
|
||||
} else {
|
||||
desc = err.Error()
|
||||
code = codes.Unknown
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code {
|
||||
case codes.InvalidArgument:
|
||||
cls = errdefs.ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = errdefs.ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = errdefs.ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = errdefs.ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
// TODO: Has suffix is not sufficient for conflict and not modified
|
||||
// Message should start with ": " or be at beginning of a line
|
||||
// Message should end with ": " or be at the end of a line
|
||||
// Compile a regex
|
||||
if desc == errdefs.ErrConflict.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrConflict.Error()) {
|
||||
cls = errdefs.ErrConflict
|
||||
} else if desc == errdefs.ErrNotModified.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrNotModified.Error()) {
|
||||
cls = errdefs.ErrNotModified
|
||||
} else {
|
||||
cls = errdefs.ErrFailedPrecondition
|
||||
}
|
||||
case codes.Unimplemented:
|
||||
cls = errdefs.ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
case codes.Aborted:
|
||||
cls = errdefs.ErrAborted
|
||||
case codes.Unauthenticated:
|
||||
cls = errdefs.ErrUnauthenticated
|
||||
case codes.PermissionDenied:
|
||||
cls = errdefs.ErrPermissionDenied
|
||||
case codes.Internal:
|
||||
cls = errdefs.ErrInternal
|
||||
case codes.DataLoss:
|
||||
cls = errdefs.ErrDataLoss
|
||||
case codes.OutOfRange:
|
||||
cls = errdefs.ErrOutOfRange
|
||||
case codes.ResourceExhausted:
|
||||
cls = errdefs.ErrResourceExhausted
|
||||
default:
|
||||
if idx := strings.LastIndex(desc, cause.UnexpectedStatusPrefix); idx > 0 {
|
||||
if status, uerr := strconv.Atoi(desc[idx+len(cause.UnexpectedStatusPrefix):]); uerr == nil && status >= 200 && status < 600 {
|
||||
cls = cause.ErrUnexpectedStatus{Status: status}
|
||||
}
|
||||
}
|
||||
if cls == nil {
|
||||
cls = errdefs.ErrUnknown
|
||||
}
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, desc)
|
||||
if msg == "" {
|
||||
err = cls
|
||||
} else if msg != desc {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else if wm, ok := cls.(interface{ WithMessage(string) error }); ok {
|
||||
err = wm.WithMessage(msg)
|
||||
} else {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
}
|
||||
|
||||
if isGRPC {
|
||||
errs := []error{err}
|
||||
for _, a := range s.Details() {
|
||||
var derr error
|
||||
|
||||
// First decode error if needed
|
||||
if s, ok := a.(*spb.Status); ok {
|
||||
derr = ToNative(status.ErrorProto(s))
|
||||
} else if e, ok := a.(error); ok {
|
||||
derr = e
|
||||
} else if dany, ok := a.(typeurl.Any); ok {
|
||||
i, uerr := typeurl.UnmarshalAny(dany)
|
||||
if uerr == nil {
|
||||
if e, ok = i.(error); ok {
|
||||
derr = e
|
||||
} else {
|
||||
derr = fmt.Errorf("non-error unmarshalled detail: %v", i)
|
||||
}
|
||||
} else {
|
||||
derr = fmt.Errorf("error of type %q with failure to unmarshal: %v", dany.GetTypeUrl(), uerr)
|
||||
}
|
||||
} else {
|
||||
derr = fmt.Errorf("non-error detail: %v", a)
|
||||
}
|
||||
|
||||
switch werr := derr.(type) {
|
||||
case interface{ WrapError(error) error }:
|
||||
errs[len(errs)-1] = werr.WrapError(errs[len(errs)-1])
|
||||
case interface{ JoinErrors(...error) error }:
|
||||
// TODO: Consider whether this should support joining a subset
|
||||
errs[0] = werr.JoinErrors(errs...)
|
||||
case interface{ CollapseError() }:
|
||||
errs[len(errs)-1] = types.CollapsedError(errs[len(errs)-1], derr)
|
||||
default:
|
||||
errs = append(errs, derr)
|
||||
}
|
||||
|
||||
}
|
||||
if len(errs) > 1 {
|
||||
err = errors.Join(errs...)
|
||||
} else {
|
||||
err = errs[0]
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, desc string) string {
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
33
vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
generated
vendored
Normal file
33
vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cause is used to define root causes for errors
|
||||
// common to errors packages like grpc and http.
|
||||
package cause
|
||||
|
||||
import "fmt"
|
||||
|
||||
type ErrUnexpectedStatus struct {
|
||||
Status int
|
||||
}
|
||||
|
||||
const UnexpectedStatusPrefix = "unexpected status "
|
||||
|
||||
func (e ErrUnexpectedStatus) Error() string {
|
||||
return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status)
|
||||
}
|
||||
|
||||
func (ErrUnexpectedStatus) Unknown() {}
|
||||
57
vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
generated
vendored
Normal file
57
vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import "fmt"
|
||||
|
||||
// CollapsibleError indicates the error should be collapsed
|
||||
type CollapsibleError interface {
|
||||
CollapseError()
|
||||
}
|
||||
|
||||
// CollapsedError returns a new error with the collapsed
|
||||
// error returned on unwrapped or when formatted with "%+v"
|
||||
func CollapsedError(err error, collapsed ...error) error {
|
||||
return collapsedError{err, collapsed}
|
||||
}
|
||||
|
||||
type collapsedError struct {
|
||||
error
|
||||
collapsed []error
|
||||
}
|
||||
|
||||
func (c collapsedError) Unwrap() []error {
|
||||
return append([]error{c.error}, c.collapsed...)
|
||||
}
|
||||
|
||||
func (c collapsedError) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", c.error)
|
||||
for _, err := range c.collapsed {
|
||||
fmt.Fprintf(s, "\n%+v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
fmt.Fprint(s, c.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", c.Error())
|
||||
}
|
||||
}
|
||||
147
vendor/github.com/containerd/errdefs/resolve.go
generated
vendored
Normal file
147
vendor/github.com/containerd/errdefs/resolve.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import "context"
|
||||
|
||||
// Resolve returns the first error found in the error chain which matches an
|
||||
// error defined in this package or context error. A raw, unwrapped error is
|
||||
// returned or ErrUnknown if no matching error is found.
|
||||
//
|
||||
// This is useful for determining a response code based on the outermost wrapped
|
||||
// error rather than the original cause. For example, a not found error deep
|
||||
// in the code may be wrapped as an invalid argument. When determining status
|
||||
// code from Is* functions, the depth or ordering of the error is not
|
||||
// considered.
|
||||
//
|
||||
// The search order is depth first, a wrapped error returned from any part of
|
||||
// the chain from `Unwrap() error` will be returned before any joined errors
|
||||
// as returned by `Unwrap() []error`.
|
||||
func Resolve(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = firstError(err)
|
||||
if err == nil {
|
||||
err = ErrUnknown
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func firstError(err error) error {
|
||||
for {
|
||||
switch err {
|
||||
case ErrUnknown,
|
||||
ErrInvalidArgument,
|
||||
ErrNotFound,
|
||||
ErrAlreadyExists,
|
||||
ErrPermissionDenied,
|
||||
ErrResourceExhausted,
|
||||
ErrFailedPrecondition,
|
||||
ErrConflict,
|
||||
ErrNotModified,
|
||||
ErrAborted,
|
||||
ErrOutOfRange,
|
||||
ErrNotImplemented,
|
||||
ErrInternal,
|
||||
ErrUnavailable,
|
||||
ErrDataLoss,
|
||||
ErrUnauthenticated,
|
||||
context.DeadlineExceeded,
|
||||
context.Canceled:
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case customMessage:
|
||||
err = e.err
|
||||
case unknown:
|
||||
return ErrUnknown
|
||||
case invalidParameter:
|
||||
return ErrInvalidArgument
|
||||
case notFound:
|
||||
return ErrNotFound
|
||||
case alreadyExists:
|
||||
return ErrAlreadyExists
|
||||
case forbidden:
|
||||
return ErrPermissionDenied
|
||||
case resourceExhausted:
|
||||
return ErrResourceExhausted
|
||||
case failedPrecondition:
|
||||
return ErrFailedPrecondition
|
||||
case conflict:
|
||||
return ErrConflict
|
||||
case notModified:
|
||||
return ErrNotModified
|
||||
case aborted:
|
||||
return ErrAborted
|
||||
case errOutOfRange:
|
||||
return ErrOutOfRange
|
||||
case notImplemented:
|
||||
return ErrNotImplemented
|
||||
case system:
|
||||
return ErrInternal
|
||||
case unavailable:
|
||||
return ErrUnavailable
|
||||
case dataLoss:
|
||||
return ErrDataLoss
|
||||
case unauthorized:
|
||||
return ErrUnauthenticated
|
||||
case deadlineExceeded:
|
||||
return context.DeadlineExceeded
|
||||
case cancelled:
|
||||
return context.Canceled
|
||||
case interface{ Unwrap() error }:
|
||||
err = e.Unwrap()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
case interface{ Unwrap() []error }:
|
||||
for _, ue := range e.Unwrap() {
|
||||
if fe := firstError(ue); fe != nil {
|
||||
return fe
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case interface{ Is(error) bool }:
|
||||
for _, target := range []error{ErrUnknown,
|
||||
ErrInvalidArgument,
|
||||
ErrNotFound,
|
||||
ErrAlreadyExists,
|
||||
ErrPermissionDenied,
|
||||
ErrResourceExhausted,
|
||||
ErrFailedPrecondition,
|
||||
ErrConflict,
|
||||
ErrNotModified,
|
||||
ErrAborted,
|
||||
ErrOutOfRange,
|
||||
ErrNotImplemented,
|
||||
ErrInternal,
|
||||
ErrUnavailable,
|
||||
ErrDataLoss,
|
||||
ErrUnauthenticated,
|
||||
context.DeadlineExceeded,
|
||||
context.Canceled} {
|
||||
if e.Is(target) {
|
||||
return target
|
||||
}
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
15
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
15
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@@ -26,12 +26,13 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -45,10 +46,6 @@ import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// TestingController is Compression with some helper methods necessary for testing.
|
||||
type TestingController interface {
|
||||
Compression
|
||||
@@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
|
||||
}
|
||||
if sampleEntry == nil {
|
||||
t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
|
||||
return
|
||||
}
|
||||
if targetEntry == nil {
|
||||
t.Fatalf("rewrite target not found")
|
||||
return
|
||||
}
|
||||
targetEntry.Offset = sampleEntry.Offset
|
||||
},
|
||||
@@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
|
||||
func randomContents(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = runes[rand.Intn(len(runes))]
|
||||
bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes))))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b[i] = runes[int(bi.Int64())]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
2
vendor/github.com/containerd/typeurl/v2/.gitignore
generated
vendored
Normal file
2
vendor/github.com/containerd/typeurl/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.test
|
||||
coverage.txt
|
||||
191
vendor/github.com/containerd/typeurl/v2/LICENSE
generated
vendored
Normal file
191
vendor/github.com/containerd/typeurl/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
26
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
Normal file
26
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# typeurl
|
||||
|
||||
[](https://pkg.go.dev/github.com/containerd/typeurl)
|
||||
[](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
|
||||
[](https://codecov.io/gh/containerd/typeurl)
|
||||
[](https://goreportcard.com/report/github.com/containerd/typeurl)
|
||||
|
||||
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
|
||||
|
||||
This package helps when types are sent over a ttrpc/GRPC API and marshaled as a protobuf [Any](https://pkg.go.dev/google.golang.org/protobuf@v1.27.1/types/known/anypb#Any)
|
||||
|
||||
## Project details
|
||||
|
||||
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
## Optional
|
||||
|
||||
By default, support for gogoproto is available along side the standard Google
|
||||
protobuf types.
|
||||
You can choose to leave gogo support out by using the `!no_gogo` build tag.
|
||||
83
vendor/github.com/containerd/typeurl/v2/doc.go
generated
vendored
Normal file
83
vendor/github.com/containerd/typeurl/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
// Package typeurl assists with managing the registration, marshaling, and
|
||||
// unmarshaling of types encoded as protobuf.Any.
|
||||
//
|
||||
// A protobuf.Any is a proto message that can contain any arbitrary data. It
|
||||
// consists of two components, a TypeUrl and a Value, and its proto definition
|
||||
// looks like this:
|
||||
//
|
||||
// message Any {
|
||||
// string type_url = 1;
|
||||
// bytes value = 2;
|
||||
// }
|
||||
//
|
||||
// The TypeUrl is used to distinguish the contents from other proto.Any
|
||||
// messages. This typeurl library manages these URLs to enable automagic
|
||||
// marshaling and unmarshaling of the contents.
|
||||
//
|
||||
// For example, consider this go struct:
|
||||
//
|
||||
// type Foo struct {
|
||||
// Field1 string
|
||||
// Field2 string
|
||||
// }
|
||||
//
|
||||
// To use typeurl, types must first be registered. This is typically done in
|
||||
// the init function
|
||||
//
|
||||
// func init() {
|
||||
// typeurl.Register(&Foo{}, "Foo")
|
||||
// }
|
||||
//
|
||||
// This will register the type Foo with the url path "Foo". The arguments to
|
||||
// Register are variadic, and are used to construct a url path. Consider this
|
||||
// example, from the github.com/containerd/containerd/client package:
|
||||
//
|
||||
// func init() {
|
||||
// const prefix = "types.containerd.io"
|
||||
// // register TypeUrls for commonly marshaled external types
|
||||
// major := strconv.Itoa(specs.VersionMajor)
|
||||
// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
||||
// // this function has more Register calls, which are elided.
|
||||
// }
|
||||
//
|
||||
// This registers several types under a more complex url, which ends up mapping
|
||||
// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
|
||||
// value for major).
|
||||
//
|
||||
// Once a type is registered, it can be marshaled to a proto.Any message simply
|
||||
// by calling `MarshalAny`, like this:
|
||||
//
|
||||
// foo := &Foo{Field1: "value1", Field2: "value2"}
|
||||
// anyFoo, err := typeurl.MarshalAny(foo)
|
||||
//
|
||||
// MarshalAny will resolve the correct URL for the type. If the type in
|
||||
// question implements the proto.Message interface, then it will be marshaled
|
||||
// as a proto message. Otherwise, it will be marshaled as json. This means that
|
||||
// typeurl will work on any arbitrary data, whether or not it has a proto
|
||||
// definition, as long as it can be serialized to json.
|
||||
//
|
||||
// To unmarshal, the process is simply inverse:
|
||||
//
|
||||
// iface, err := typeurl.UnmarshalAny(anyFoo)
|
||||
// foo := iface.(*Foo)
|
||||
//
|
||||
// The correct type is automatically chosen from the type registry, and the
|
||||
// returned interface can be cast straight to that type.
|
||||
309
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
Normal file
309
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
Normal file
@@ -0,0 +1,309 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
registry = make(map[reflect.Type]string)
|
||||
handlers []handler
|
||||
)
|
||||
|
||||
type handler interface {
|
||||
Marshaller(interface{}) func() ([]byte, error)
|
||||
Unmarshaller(interface{}) func([]byte) error
|
||||
TypeURL(interface{}) string
|
||||
GetType(url string) (reflect.Type, bool)
|
||||
}
|
||||
|
||||
// Definitions of common error types used throughout typeurl.
|
||||
//
|
||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||
// to an error.
|
||||
//
|
||||
// To detect an error class, use errors.Is() functions to tell whether an
|
||||
// error is of this type.
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
)
|
||||
|
||||
// Any contains an arbitrary protcol buffer message along with its type.
|
||||
//
|
||||
// While there is google.golang.org/protobuf/types/known/anypb.Any,
|
||||
// we'd like to have our own to hide the underlying protocol buffer
|
||||
// implementations from containerd clients.
|
||||
//
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#any
|
||||
type Any interface {
|
||||
// GetTypeUrl returns a URL/resource name that uniquely identifies
|
||||
// the type of the serialized protocol buffer message.
|
||||
GetTypeUrl() string
|
||||
|
||||
// GetValue returns a valid serialized protocol buffer of the type that
|
||||
// GetTypeUrl() indicates.
|
||||
GetValue() []byte
|
||||
}
|
||||
|
||||
type anyType struct {
|
||||
typeURL string
|
||||
value []byte
|
||||
}
|
||||
|
||||
func (a *anyType) GetTypeUrl() string {
|
||||
if a == nil {
|
||||
return ""
|
||||
}
|
||||
return a.typeURL
|
||||
}
|
||||
|
||||
func (a *anyType) GetValue() []byte {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return a.value
|
||||
}
|
||||
|
||||
// Register a type with a base URL for JSON marshaling. When the MarshalAny and
|
||||
// UnmarshalAny functions are called they will treat the Any type value as JSON.
|
||||
// To use protocol buffers for handling the Any value the proto.Register
|
||||
// function should be used instead of this function.
|
||||
func Register(v interface{}, args ...string) {
|
||||
var (
|
||||
t = tryDereference(v)
|
||||
p = path.Join(args...)
|
||||
)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if et, ok := registry[t]; ok {
|
||||
if et != p {
|
||||
panic(fmt.Errorf("type registered with alternate path %q != %q", et, p))
|
||||
}
|
||||
return
|
||||
}
|
||||
registry[t] = p
|
||||
}
|
||||
|
||||
// TypeURL returns the type url for a registered type.
|
||||
func TypeURL(v interface{}) (string, error) {
|
||||
mu.RLock()
|
||||
u, ok := registry[tryDereference(v)]
|
||||
mu.RUnlock()
|
||||
if !ok {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
return string(t.ProtoReflect().Descriptor().FullName()), nil
|
||||
default:
|
||||
for _, h := range handlers {
|
||||
if u := h.TypeURL(v); u != "" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
|
||||
}
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Is returns true if the type of the Any is the same as v.
|
||||
func Is(any Any, v interface{}) bool {
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
// call to check that v is a pointer
|
||||
tryDereference(v)
|
||||
url, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return any.GetTypeUrl() == url
|
||||
}
|
||||
|
||||
// MarshalAny marshals the value v into an any with the correct TypeUrl.
|
||||
// If the provided object is already a proto.Any message, then it will be
|
||||
// returned verbatim. If it is of type proto.Message, it will be marshaled as a
|
||||
// protocol buffer. Otherwise, the object will be marshaled to json.
|
||||
func MarshalAny(v interface{}) (Any, error) {
|
||||
var marshal func(v interface{}) ([]byte, error)
|
||||
switch t := v.(type) {
|
||||
case Any:
|
||||
// avoid reserializing the type if we have an any.
|
||||
return t, nil
|
||||
case proto.Message:
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(t)
|
||||
}
|
||||
default:
|
||||
for _, h := range handlers {
|
||||
if m := h.Marshaller(v); m != nil {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return m()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if marshal == nil {
|
||||
marshal = json.Marshal
|
||||
}
|
||||
}
|
||||
|
||||
url, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &anyType{
|
||||
typeURL: url,
|
||||
value: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the any type into a concrete type.
|
||||
func UnmarshalAny(any Any) (interface{}, error) {
|
||||
return UnmarshalByTypeURL(any.GetTypeUrl(), any.GetValue())
|
||||
}
|
||||
|
||||
// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type.
|
||||
func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
|
||||
return unmarshal(typeURL, value, nil)
|
||||
}
|
||||
|
||||
// UnmarshalTo unmarshals the any type into a concrete type passed in the out
|
||||
// argument. It is identical to UnmarshalAny, but lets clients provide a
|
||||
// destination type through the out argument.
|
||||
func UnmarshalTo(any Any, out interface{}) error {
|
||||
return UnmarshalToByTypeURL(any.GetTypeUrl(), any.GetValue(), out)
|
||||
}
|
||||
|
||||
// UnmarshalToByTypeURL unmarshals the given type and value into a concrete type passed
|
||||
// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients
|
||||
// provide a destination type through the out argument.
|
||||
func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
|
||||
_, err := unmarshal(typeURL, value, out)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any.
|
||||
func MarshalProto(from Any) *anypb.Any {
|
||||
if from == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pbany, ok := from.(*anypb.Any); ok {
|
||||
return pbany
|
||||
}
|
||||
|
||||
return &anypb.Any{
|
||||
TypeUrl: from.GetTypeUrl(),
|
||||
Value: from.GetValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any.
|
||||
func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||
anyType, err := MarshalAny(from)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return MarshalProto(anyType), nil
|
||||
}
|
||||
|
||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||
t, isProto, err := getTypeByUrl(typeURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
v = reflect.New(t).Interface()
|
||||
} else {
|
||||
// Validate interface type provided by client
|
||||
vURL, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if typeURL != vURL {
|
||||
return nil, fmt.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
|
||||
}
|
||||
}
|
||||
|
||||
if isProto {
|
||||
pm, ok := v.(proto.Message)
|
||||
if ok {
|
||||
return v, proto.Unmarshal(value, pm)
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if unmarshal := h.Unmarshaller(v); unmarshal != nil {
|
||||
return v, unmarshal(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fallback to json unmarshaller
|
||||
return v, json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) {
|
||||
mu.RLock()
|
||||
for t, u := range registry {
|
||||
if u == url {
|
||||
mu.RUnlock()
|
||||
return t, false, nil
|
||||
}
|
||||
}
|
||||
mu.RUnlock()
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
if errors.Is(err, protoregistry.NotFound) {
|
||||
for _, h := range handlers {
|
||||
if t, isProto := h.GetType(url); t != nil {
|
||||
return t, isProto, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
}
|
||||
empty := mt.New().Interface()
|
||||
return reflect.TypeOf(empty).Elem(), true, nil
|
||||
}
|
||||
|
||||
func tryDereference(v interface{}) reflect.Type {
|
||||
t := reflect.TypeOf(v)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
// require check of pointer but dereference to register
|
||||
return t.Elem()
|
||||
}
|
||||
panic("v is not a pointer to a type")
|
||||
}
|
||||
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
//go:build !no_gogo
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
handlers = append(handlers, gogoHandler{})
|
||||
}
|
||||
|
||||
type gogoHandler struct{}
|
||||
|
||||
func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return func() ([]byte, error) {
|
||||
return gogoproto.Marshal(pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return func(dt []byte) error {
|
||||
return gogoproto.Unmarshal(dt, pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) TypeURL(v interface{}) string {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return gogoproto.MessageName(pm)
|
||||
}
|
||||
|
||||
func (gogoHandler) GetType(url string) (reflect.Type, bool) {
|
||||
t := gogoproto.MessageType(url)
|
||||
if t == nil {
|
||||
return nil, false
|
||||
}
|
||||
return t.Elem(), true
|
||||
}
|
||||
117
vendor/github.com/containers/common/pkg/capabilities/capabilities.go
generated
vendored
117
vendor/github.com/containers/common/pkg/capabilities/capabilities.go
generated
vendored
@@ -9,20 +9,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"github.com/moby/sys/capability"
|
||||
)
|
||||
|
||||
var (
|
||||
// Used internally and populated during init().
|
||||
capabilityList []string
|
||||
|
||||
// Used internally and populated during init().
|
||||
capsList []capability.Cap
|
||||
|
||||
// ErrUnknownCapability is thrown when an unknown capability is processed.
|
||||
ErrUnknownCapability = errors.New("unknown capability")
|
||||
|
||||
@@ -35,67 +28,67 @@ var (
|
||||
// Useful on the CLI for `--cap-add=all` etc.
|
||||
const All = "ALL"
|
||||
|
||||
func getCapName(c capability.Cap) string {
|
||||
func capName(c capability.Cap) string {
|
||||
return "CAP_" + strings.ToUpper(c.String())
|
||||
}
|
||||
|
||||
func init() {
|
||||
last := capability.CAP_LAST_CAP
|
||||
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
||||
if last == capability.Cap(63) {
|
||||
last = capability.CAP_BLOCK_SUSPEND
|
||||
// capStrList returns all capabilities supported by the currently running kernel,
|
||||
// or an error if the list can not be obtained.
|
||||
var capStrList = sync.OnceValues(func() ([]string, error) {
|
||||
list, err := capability.ListSupported()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, cap := range capability.List() {
|
||||
if cap > last {
|
||||
caps := make([]string, len(list))
|
||||
for i, c := range list {
|
||||
caps[i] = capName(c)
|
||||
}
|
||||
slices.Sort(caps)
|
||||
return caps, nil
|
||||
})
|
||||
|
||||
// BoundingSet returns the capabilities in the current bounding set.
|
||||
func BoundingSet() ([]string, error) {
|
||||
return boundingSet()
|
||||
}
|
||||
|
||||
var boundingSet = sync.OnceValues(func() ([]string, error) {
|
||||
currentCaps, err := capability.NewPid2(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = currentCaps.Load()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := capability.ListSupported()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r []string
|
||||
for _, c := range list {
|
||||
if !currentCaps.Get(capability.BOUNDING, c) {
|
||||
continue
|
||||
}
|
||||
capsList = append(capsList, cap)
|
||||
capabilityList = append(capabilityList, getCapName(cap))
|
||||
sort.Strings(capabilityList)
|
||||
r = append(r, capName(c))
|
||||
}
|
||||
}
|
||||
slices.Sort(r)
|
||||
return r, nil
|
||||
})
|
||||
|
||||
var (
|
||||
boundingSetOnce sync.Once
|
||||
boundingSetRet []string
|
||||
boundingSetErr error
|
||||
)
|
||||
|
||||
// BoundingSet returns the capabilities in the current bounding set
|
||||
func BoundingSet() ([]string, error) {
|
||||
boundingSetOnce.Do(func() {
|
||||
currentCaps, err := capability.NewPid2(0)
|
||||
if err != nil {
|
||||
boundingSetErr = err
|
||||
return
|
||||
}
|
||||
err = currentCaps.Load()
|
||||
if err != nil {
|
||||
boundingSetErr = err
|
||||
return
|
||||
}
|
||||
var r []string
|
||||
for _, c := range capsList {
|
||||
if !currentCaps.Get(capability.BOUNDING, c) {
|
||||
continue
|
||||
}
|
||||
r = append(r, getCapName(c))
|
||||
}
|
||||
boundingSetRet = r
|
||||
sort.Strings(boundingSetRet)
|
||||
boundingSetErr = err
|
||||
})
|
||||
return boundingSetRet, boundingSetErr
|
||||
}
|
||||
|
||||
// AllCapabilities returns all known capabilities.
|
||||
// AllCapabilities returns all capabilities supported by the running kernel.
|
||||
func AllCapabilities() []string {
|
||||
return capabilityList
|
||||
list, _ := capStrList()
|
||||
return list
|
||||
}
|
||||
|
||||
// NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet
|
||||
// present).
|
||||
func NormalizeCapabilities(caps []string) ([]string, error) {
|
||||
all, err := capStrList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
normalized := make([]string, 0, len(caps))
|
||||
for _, c := range caps {
|
||||
c = strings.ToUpper(c)
|
||||
@@ -106,19 +99,23 @@ func NormalizeCapabilities(caps []string) ([]string, error) {
|
||||
if !strings.HasPrefix(c, "CAP_") {
|
||||
c = "CAP_" + c
|
||||
}
|
||||
if !slices.Contains(capabilityList, c) {
|
||||
if !slices.Contains(all, c) {
|
||||
return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability)
|
||||
}
|
||||
normalized = append(normalized, c)
|
||||
}
|
||||
sort.Strings(normalized)
|
||||
slices.Sort(normalized)
|
||||
return normalized, nil
|
||||
}
|
||||
|
||||
// ValidateCapabilities validates if caps only contains valid capabilities.
|
||||
func ValidateCapabilities(caps []string) error {
|
||||
all, err := capStrList()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range caps {
|
||||
if !slices.Contains(capabilityList, c) {
|
||||
if !slices.Contains(all, c) {
|
||||
return fmt.Errorf("%q: %w", c, ErrUnknownCapability)
|
||||
}
|
||||
}
|
||||
@@ -155,7 +152,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
|
||||
return nil, errors.New("adding all caps and removing all caps not allowed")
|
||||
}
|
||||
// "Drop" all capabilities; return what's in capAdd instead
|
||||
sort.Strings(capAdd)
|
||||
slices.Sort(capAdd)
|
||||
return capAdd, nil
|
||||
}
|
||||
|
||||
@@ -195,6 +192,6 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
|
||||
}
|
||||
caps = append(caps, cap)
|
||||
}
|
||||
sort.Strings(caps)
|
||||
slices.Sort(caps)
|
||||
return caps, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/common/pkg/report/camelcase/camelcase.go
generated
vendored
2
vendor/github.com/containers/common/pkg/report/camelcase/camelcase.go
generated
vendored
@@ -73,7 +73,7 @@ func Split(src string) (entries []string) {
|
||||
}
|
||||
// handle upper case -> lower case sequences, e.g.
|
||||
// "PDFL", "oader" -> "PDF", "Loader"
|
||||
for i := 0; i < len(runes)-1; i++ {
|
||||
for i := range len(runes) - 1 {
|
||||
if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) {
|
||||
runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...)
|
||||
runes[i] = runes[i][:len(runes[i])-1]
|
||||
|
||||
2
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
2
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
@@ -101,7 +101,7 @@ func Headers(object any, overrides map[string]string) []map[string]string {
|
||||
|
||||
// Column header will be field name upper-cased.
|
||||
headers := make(map[string]string, value.NumField())
|
||||
for i := 0; i < value.Type().NumField(); i++ {
|
||||
for i := range value.Type().NumField() {
|
||||
field := value.Type().Field(i)
|
||||
// Recurse to find field names from promoted structs
|
||||
if field.Type.Kind() == reflect.Struct && field.Anonymous {
|
||||
|
||||
161
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
161
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -34,10 +35,10 @@ var (
|
||||
|
||||
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
|
||||
type bpDetectCompressionStepData struct {
|
||||
isCompressed bool
|
||||
format compressiontypes.Algorithm // Valid if isCompressed
|
||||
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
|
||||
srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
|
||||
isCompressed bool
|
||||
format compressiontypes.Algorithm // Valid if isCompressed
|
||||
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
|
||||
srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob.
|
||||
}
|
||||
|
||||
// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
|
||||
@@ -51,15 +52,25 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
}
|
||||
stream.reader = reader
|
||||
|
||||
if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName {
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return bpDetectCompressionStepData{}, err
|
||||
}
|
||||
if tocDigest != nil {
|
||||
format = compression.ZstdChunked
|
||||
}
|
||||
|
||||
}
|
||||
res := bpDetectCompressionStepData{
|
||||
isCompressed: decompressor != nil,
|
||||
format: format,
|
||||
decompressor: decompressor,
|
||||
}
|
||||
if res.isCompressed {
|
||||
res.srcCompressorName = format.Name()
|
||||
res.srcCompressorBaseVariantName = format.BaseVariantName()
|
||||
} else {
|
||||
res.srcCompressorName = internalblobinfocache.Uncompressed
|
||||
res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed
|
||||
}
|
||||
|
||||
if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
|
||||
@@ -70,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
|
||||
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
|
||||
type bpCompressionStepData struct {
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
|
||||
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
|
||||
uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob.
|
||||
uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob.
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
}
|
||||
|
||||
type bpcOperation int
|
||||
@@ -128,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp
|
||||
// We can’t do anything with an encrypted blob unless decrypted.
|
||||
logrus.Debugf("Using original blob without modification for encrypted blob")
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpPreserveOpaque,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorName: internalblobinfocache.UnknownCompression,
|
||||
operation: bpcOpPreserveOpaque,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@@ -156,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
specificVariantName := uploadedAlgorithm.Name()
|
||||
if specificVariantName == uploadedAlgorithm.BaseVariantName() {
|
||||
specificVariantName = internalblobinfocache.UnknownCompression
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpCompressUncompressed,
|
||||
uploadedOperation: types.Compress,
|
||||
uploadedAlgorithm: uploadedAlgorithm,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: uploadedAlgorithm.Name(),
|
||||
closers: []io.Closer{reader},
|
||||
operation: bpcOpCompressUncompressed,
|
||||
uploadedOperation: types.Compress,
|
||||
uploadedAlgorithm: uploadedAlgorithm,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(),
|
||||
uploadedCompressorSpecificVariantName: specificVariantName,
|
||||
closers: []io.Closer{reader},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@@ -196,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
specificVariantName := ic.compressionFormat.Name()
|
||||
if specificVariantName == ic.compressionFormat.BaseVariantName() {
|
||||
specificVariantName = internalblobinfocache.UnknownCompression
|
||||
}
|
||||
succeeded = true
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpRecompressCompressed,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: ic.compressionFormat.Name(),
|
||||
closers: []io.Closer{decompressed, recompressed},
|
||||
operation: bpcOpRecompressCompressed,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(),
|
||||
uploadedCompressorSpecificVariantName: specificVariantName,
|
||||
closers: []io.Closer{decompressed, recompressed},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@@ -225,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
|
||||
Size: -1,
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpDecompressCompressed,
|
||||
uploadedOperation: types.Decompress,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: internalblobinfocache.Uncompressed,
|
||||
closers: []io.Closer{s},
|
||||
operation: bpcOpDecompressCompressed,
|
||||
uploadedOperation: types.Decompress,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
closers: []io.Closer{s},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@@ -268,11 +292,15 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
|
||||
algorithm = nil
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOp,
|
||||
uploadedOperation: uploadedOp,
|
||||
uploadedAlgorithm: algorithm,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: detected.srcCompressorName,
|
||||
operation: bpcOp,
|
||||
uploadedOperation: uploadedOp,
|
||||
uploadedAlgorithm: algorithm,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
// We only record the base variant of the format on upload; we didn’t do anything with
|
||||
// the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger
|
||||
// reuse of any kind between the blob digest and the TOC digest.
|
||||
uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -308,6 +336,15 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
// No useful information
|
||||
case bpcOpCompressUncompressed:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
if d.uploadedAnnotations != nil {
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing just-created compression annotations: %w", err)
|
||||
}
|
||||
if tocDigest != nil {
|
||||
c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest)
|
||||
}
|
||||
}
|
||||
case bpcOpDecompressCompressed:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
case bpcOpRecompressCompressed, bpcOpPreserveCompressed:
|
||||
@@ -323,29 +360,27 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
|
||||
}
|
||||
}
|
||||
if d.srcCompressorName == "" || d.uploadedCompressorName == "" {
|
||||
return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)",
|
||||
d.srcCompressorName, d.uploadedCompressorName)
|
||||
if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" {
|
||||
return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)",
|
||||
d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName)
|
||||
}
|
||||
if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms.
|
||||
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
|
||||
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
|
||||
//
|
||||
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
|
||||
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
|
||||
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
|
||||
// inconsistent data to be logged.
|
||||
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
|
||||
}
|
||||
if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
|
||||
c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: d.uploadedCompressorBaseVariantName,
|
||||
SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName,
|
||||
SpecificVariantAnnotations: d.uploadedAnnotations,
|
||||
})
|
||||
}
|
||||
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
|
||||
d.srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms, see above.
|
||||
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
|
||||
}
|
||||
d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
|
||||
// If the source is already using some TOC-dependent variant, we either copied the
|
||||
// blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest,
|
||||
// so record neither the variant name, nor the TOC digest.
|
||||
c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: d.srcCompressorBaseVariantName,
|
||||
SpecificVariantCompressor: internalblobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
53
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
53
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@@ -137,6 +137,17 @@ type Options struct {
|
||||
// DestinationCtx.CompressionFormat is used exclusively, and blobs of other
|
||||
// compression algorithms are not reused.
|
||||
ForceCompressionFormat bool
|
||||
|
||||
// ReportResolvedReference, if set, asks the destination transport to store
|
||||
// a “resolved” (more detailed) reference to the created image
|
||||
// into the value this option points to.
|
||||
// What “resolved” means is transport-specific.
|
||||
// Most transports don’t support this, and cause the value to be set to nil.
|
||||
//
|
||||
// For the containers-storage: transport, the reference contains an image ID,
|
||||
// so that storage.ResolveReference returns exactly the created image.
|
||||
// WARNING: It is unspecified whether the reference also contains a reference.Named element.
|
||||
ReportResolvedReference *types.ImageReference
|
||||
}
|
||||
|
||||
// OptionCompressionVariant allows to supply information about
|
||||
@@ -193,35 +204,33 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
reportWriter = options.ReportWriter
|
||||
}
|
||||
|
||||
// safeClose amends retErr with an error from c.Close(), if any.
|
||||
safeClose := func(name string, c io.Closer) {
|
||||
err := c.Close()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
// Do not use %w for err as we don't want it to be unwrapped by callers.
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (%s: %s)", name, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
|
||||
}
|
||||
dest := imagedestination.FromPublic(publicDest)
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (dest: %v): %w", err, retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (dest: %v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer safeClose("dest", dest)
|
||||
|
||||
publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
rawSource := imagesource.FromPublic(publicRawSource)
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (src: %v): %w", err, retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (src: %v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer safeClose("src", rawSource)
|
||||
|
||||
// If reportWriter is not a TTY (e.g., when piping to a file), do not
|
||||
// print the progress bars to avoid long and hard to parse output.
|
||||
@@ -339,7 +348,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil {
|
||||
if options.ReportResolvedReference != nil {
|
||||
*options.ReportResolvedReference = nil // The default outcome, if not specifically supported by the transport.
|
||||
}
|
||||
if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{
|
||||
UnparsedToplevel: c.unparsedToplevel,
|
||||
ReportResolvedReference: options.ReportResolvedReference,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("committing the finished image: %w", err)
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
@@ -48,7 +48,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
|
||||
Annotations: stream.info.Annotations,
|
||||
}
|
||||
// DecryptLayer supposedly returns a digest of the decrypted stream.
|
||||
// In pratice, that value is never set in the current implementation.
|
||||
// In practice, that value is never set in the current implementation.
|
||||
// And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key,
|
||||
// i.e. it doesn’t authenticate the origin of the metadata in any way.
|
||||
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||
|
||||
15
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
15
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
@@ -24,13 +24,18 @@ func (c *copier) newProgressPool() *mpb.Progress {
|
||||
|
||||
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
|
||||
func customPartialBlobDecorFunc(s decor.Statistics) string {
|
||||
current := decor.SizeB1024(s.Current)
|
||||
total := decor.SizeB1024(s.Total)
|
||||
refill := decor.SizeB1024(s.Refill)
|
||||
if s.Total == 0 {
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f)"
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
|
||||
return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill)
|
||||
}
|
||||
// If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%")
|
||||
if s.Refill == 0 {
|
||||
return fmt.Sprintf("%.1f / %.1f", current, total)
|
||||
}
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
|
||||
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
|
||||
return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage)
|
||||
}
|
||||
|
||||
// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
|
||||
@@ -121,7 +126,7 @@ func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// mark100PercentComplete marks the progres bars as 100% complete;
|
||||
// mark100PercentComplete marks the progress bars as 100% complete;
|
||||
// it may do so by possibly advancing the current state if it is below the known total.
|
||||
func (bar *progressBar) mark100PercentComplete() {
|
||||
if bar.originalSize > 0 {
|
||||
|
||||
2
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
@@ -106,7 +106,7 @@ func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity
|
||||
if len(c.signers) == 1 {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
} else {
|
||||
return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err)
|
||||
return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err)
|
||||
}
|
||||
}
|
||||
res = append(res, newSig)
|
||||
|
||||
93
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
93
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -108,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
@@ -149,6 +150,28 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
|
||||
ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
|
||||
}
|
||||
// HACK: Don’t combine zstd:chunked and encryption.
|
||||
// zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption
|
||||
// to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have.
|
||||
// Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without
|
||||
// encryption. (That can be determined from the unencrypted config anyway, but, still...)
|
||||
//
|
||||
// Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of
|
||||
// hard-coding zstd:chunked / zstd.
|
||||
if ic.c.options.OciEncryptLayers != nil {
|
||||
format := ic.compressionFormat
|
||||
if format == nil {
|
||||
format = defaultCompressionFormat
|
||||
}
|
||||
if format.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
if ic.requireCompressionFormatMatch {
|
||||
return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead")
|
||||
}
|
||||
logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead")
|
||||
ic.compressionFormat = &compression.Zstd
|
||||
}
|
||||
}
|
||||
|
||||
// Decide whether we can substitute blobs with semantic equivalents:
|
||||
// - Don’t do that if we can’t modify the manifest at all
|
||||
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
@@ -192,7 +215,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
|
||||
noPendingManifestUpdates := ic.noPendingManifestUpdates()
|
||||
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
|
||||
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
|
||||
matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
|
||||
if err != nil {
|
||||
@@ -293,17 +316,17 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary.
|
||||
func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error {
|
||||
ociConfig, configErr := src.OCIConfig(ctx)
|
||||
// Do not fail on configErr here, this might be an artifact
|
||||
// and maybe nothing needs this to be a container image and to process the config.
|
||||
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(sys)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting current platform information %#v: %w", sys, err)
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", configErr)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||
|
||||
options := newOrderedSet()
|
||||
match := false
|
||||
@@ -311,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
// For a transitional period, this might trigger warnings because the Variant
|
||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||
// revert this check to only look for (OS, Architecture).
|
||||
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
|
||||
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
@@ -319,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
|
||||
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -799,11 +827,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
|
||||
}
|
||||
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
||||
return false, types.BlobInfo{}, nil
|
||||
// On a "partial content not available" error, ignore it and retrieve the whole layer.
|
||||
var perr private.ErrFallbackToOrdinaryLayerDownload
|
||||
if errors.As(err, &perr) {
|
||||
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, err
|
||||
}()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
return types.BlobInfo{}, "", fmt.Errorf("partial pull of blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
if reused {
|
||||
return blobInfo, cachedDiffID, nil
|
||||
@@ -866,21 +899,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse
|
||||
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
|
||||
// of the generic code in this package.
|
||||
res := types.BlobInfo{
|
||||
Digest: reusedBlob.Digest,
|
||||
Size: reusedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||
Digest: reusedBlob.Digest,
|
||||
Size: reusedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
// FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t
|
||||
// (but those annotations being left with incorrect values should not break pulls).
|
||||
Annotations: maps.Clone(inputInfo.Annotations),
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||
CompressionOperation: reusedBlob.CompressionOperation,
|
||||
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
|
||||
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
|
||||
}
|
||||
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
|
||||
// if the blob was substituted; otherwise, fill it in based
|
||||
// if the blob was substituted; otherwise, it is optional, and if not set, fill it in based
|
||||
// on what we know from the srcInfos we were given.
|
||||
if reusedBlob.Digest == inputInfo.Digest {
|
||||
res.CompressionOperation = inputInfo.CompressionOperation
|
||||
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||
if res.CompressionOperation == types.PreserveOriginal {
|
||||
res.CompressionOperation = inputInfo.CompressionOperation
|
||||
}
|
||||
if res.CompressionAlgorithm == nil {
|
||||
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||
}
|
||||
}
|
||||
if len(reusedBlob.CompressionAnnotations) != 0 {
|
||||
if res.Annotations == nil {
|
||||
res.Annotations = map[string]string{}
|
||||
}
|
||||
maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
12
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
12
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don'
|
||||
type dirImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.AlwaysSupportsSignatures
|
||||
|
||||
@@ -251,14 +252,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error {
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *dirImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
26
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
26
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
@@ -34,16 +34,17 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat
|
||||
writer = w
|
||||
closeWriter = true
|
||||
}
|
||||
tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
return &archiveImageDestination{
|
||||
Destination: tarDest,
|
||||
d := &archiveImageDestination{
|
||||
ref: ref,
|
||||
writer: writer,
|
||||
closeWriter: closeWriter,
|
||||
}, nil
|
||||
}
|
||||
tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref, d.CommitWithOptions)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
d.Destination = tarDest
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
@@ -60,14 +61,11 @@ func (d *archiveImageDestination) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
d.writer.imageCommitted()
|
||||
if d.closeWriter {
|
||||
// We could do this only in .Close(), but failures in .Close() are much more likely to be
|
||||
|
||||
6
vendor/github.com/containers/image/v5/docker/body_reader.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/body_reader.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -158,7 +158,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
|
||||
logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
|
||||
}
|
||||
br.body = nil
|
||||
time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
|
||||
time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
|
||||
|
||||
headers := map[string][]string{
|
||||
"Range": {fmt.Sprintf("bytes=%d-", br.offset)},
|
||||
@@ -197,7 +197,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
|
||||
consumedBody = true
|
||||
br.body = res.Body
|
||||
br.lastRetryOffset = br.offset
|
||||
br.lastRetryTime = time.Time{}
|
||||
br.lastRetryTime = time.Now()
|
||||
return n, nil
|
||||
|
||||
default:
|
||||
|
||||
14
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
14
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
@@ -3,6 +3,7 @@ package daemon
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
||||
}
|
||||
switch serverURL.Scheme {
|
||||
case "unix": // Nothing
|
||||
case "npipe": // Nothing
|
||||
case "http":
|
||||
hc := httpConfig()
|
||||
opts = append(opts, dockerclient.WithHTTPClient(hc))
|
||||
@@ -80,7 +82,13 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
||||
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsc,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}, nil
|
||||
@@ -89,7 +97,13 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
||||
func httpConfig() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: nil,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}
|
||||
|
||||
20
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
20
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
@@ -56,16 +56,17 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
|
||||
goroutineContext, goroutineCancel := context.WithCancel(ctx)
|
||||
go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
|
||||
|
||||
return &daemonImageDestination{
|
||||
d := &daemonImageDestination{
|
||||
ref: ref,
|
||||
mustMatchRuntimeOS: mustMatchRuntimeOS,
|
||||
Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef),
|
||||
archive: archive,
|
||||
goroutineCancel: goroutineCancel,
|
||||
statusChannel: statusChannel,
|
||||
writer: writer,
|
||||
committed: false,
|
||||
}, nil
|
||||
}
|
||||
d.Destination = tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef, d.CommitWithOptions)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
|
||||
@@ -146,7 +147,7 @@ func (d *daemonImageDestination) Close() error {
|
||||
// immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
|
||||
// the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
|
||||
// Whether that works or not, closing the PipeWriter seems desirable in any case.
|
||||
if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil {
|
||||
if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .CommitWithOptions()")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -159,14 +160,11 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
|
||||
return d.ref
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *daemonImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
logrus.Debugf("docker-daemon: Closing tar stream")
|
||||
if err := d.archive.Close(); err != nil {
|
||||
return err
|
||||
|
||||
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
@@ -24,7 +24,6 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
@@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error {
|
||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
switch {
|
||||
case resp.StatusCode == http.StatusUnauthorized:
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
for _, c := range parseAuthHeader(resp.Header) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
@@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error {
|
||||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
|
||||
42
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
42
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -42,7 +42,6 @@ const (
|
||||
dockerRegistry = "registry-1.docker.io"
|
||||
|
||||
resolvedPingV2URL = "%s://%s/v2/"
|
||||
resolvedPingV1URL = "%s://%s/v1/_ping"
|
||||
tagsPath = "/v2/%s/tags/list"
|
||||
manifestPath = "/v2/%s/manifests/%s"
|
||||
blobsPath = "/v2/%s/blobs/%s"
|
||||
@@ -936,34 +935,6 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
|
||||
if c.sys != nil && c.sys.DockerDisableV1Ping {
|
||||
return err
|
||||
}
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
isV1 := pingV1("https")
|
||||
if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
|
||||
isV1 = pingV1("http")
|
||||
}
|
||||
if isV1 {
|
||||
err = ErrV1NotSupported
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1085,6 +1056,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
|
||||
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
|
||||
if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err)
|
||||
}
|
||||
digestAlgorithm := desc.Digest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String())
|
||||
}
|
||||
|
||||
reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1094,6 +1074,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
|
||||
}
|
||||
actualDigest := digestAlgorithm.FromBytes(payload)
|
||||
if actualDigest != desc.Digest {
|
||||
return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String())
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
|
||||
6
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
@@ -91,6 +91,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
}
|
||||
for _, tag := range tagsHolder.Tags {
|
||||
if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
|
||||
// Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary
|
||||
// to the spec, may include JSON null values in the list; and Go silently parses them as "".
|
||||
if tag == "" {
|
||||
logrus.Debugf("Ignoring invalid empty tag")
|
||||
continue
|
||||
}
|
||||
// Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
|
||||
// contrary to the tag format specified in
|
||||
// https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
|
||||
|
||||
34
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
34
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -41,6 +41,7 @@ import (
|
||||
type dockerImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
ref dockerReference
|
||||
@@ -332,6 +333,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
|
||||
originalCandidateKnownToBeMissing := false
|
||||
if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||
@@ -341,9 +343,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
}
|
||||
originalCandidateKnownToBeMissing = true
|
||||
} else {
|
||||
logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
|
||||
optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
// We can get here with a blob detected to be zstd when the user wants a zstd:chunked.
|
||||
// In that case we keep originalCandiateKnownToBeMissing = false, so that if we find
|
||||
// a BIC entry for this blob, we do use that entry and return a zstd:chunked entry
|
||||
// with the BIC’s annotations.
|
||||
// This is not quite correct, it only works if the BIC also contains an acceptable _location_.
|
||||
// Ideally, we could look up just the compression algorithm/annotations for info.digest,
|
||||
// and use it even if no location candidate exists and the original dandidate is present.
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
@@ -387,7 +397,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
// for it in the current repo.
|
||||
candidateRepo = reference.TrimNamed(d.ref.ref)
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
if originalCandidateKnownToBeMissing &&
|
||||
candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
@@ -427,10 +438,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
|
||||
return true, private.ReusedBlob{
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm,
|
||||
CompressionAnnotations: candidate.CompressionAnnotations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return false, private.ReusedBlob{}, nil
|
||||
@@ -911,13 +924,10 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error {
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *dockerImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
77
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
77
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@@ -116,10 +116,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
||||
// Don’t just build a string, try to preserve the typed error.
|
||||
primary := &attempts[len(attempts)-1]
|
||||
extras := []string{}
|
||||
for i := 0; i < len(attempts)-1; i++ {
|
||||
for _, attempt := range attempts[:len(attempts)-1] {
|
||||
// This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
|
||||
// The paired [] at least have some chance of being unambiguous.
|
||||
extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
|
||||
extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err))
|
||||
}
|
||||
return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
|
||||
}
|
||||
@@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
||||
}
|
||||
return
|
||||
}
|
||||
if parts >= len(chunks) {
|
||||
errs <- errors.New("too many parts returned by the server")
|
||||
break
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: p,
|
||||
@@ -464,26 +468,20 @@ func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanc
|
||||
var res []signature.Signature
|
||||
switch {
|
||||
case s.c.supportsSignatures:
|
||||
sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, sigs...)
|
||||
case s.c.signatureBase != nil:
|
||||
sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, sigs...)
|
||||
default:
|
||||
return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||
}
|
||||
|
||||
sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, sigstoreSigs...)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -505,35 +503,35 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
|
||||
return manifest.Digest(s.cachedManifest)
|
||||
}
|
||||
|
||||
// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil.
|
||||
func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||
// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil, storing the signatures to *dest.
|
||||
// On error, the contents of *dest are undefined.
|
||||
func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
signatures := []signature.Signature{}
|
||||
for i := 0; ; i++ {
|
||||
if i >= maxLookasideSignatures {
|
||||
return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
|
||||
return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
|
||||
}
|
||||
|
||||
sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
signature, missing, err := s.getOneSignature(ctx, sigURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if missing {
|
||||
break
|
||||
}
|
||||
signatures = append(signatures, signature)
|
||||
*dest = append(*dest, signature)
|
||||
}
|
||||
return signatures, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil)
|
||||
@@ -596,48 +594,51 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL
|
||||
}
|
||||
}
|
||||
|
||||
// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension.
|
||||
func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||
// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
|
||||
// storing the signatures to *dest.
|
||||
// On error, the contents of *dest are undefined.
|
||||
func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
var sigs []signature.Signature
|
||||
for _, sig := range parsedBody.Signatures {
|
||||
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
|
||||
sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
|
||||
*dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content))
|
||||
}
|
||||
}
|
||||
return sigs, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||
// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention,
|
||||
// storing the signatures to *dest.
|
||||
// On error, the contents of *dest are undefined.
|
||||
func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
|
||||
if !s.c.useSigstoreAttachments {
|
||||
logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if ociManifest == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
|
||||
res := []signature.Signature{}
|
||||
for layerIndex, layer := range ociManifest.Layers {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
@@ -648,11 +649,11 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con
|
||||
payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
|
||||
none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
|
||||
*dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
|
||||
}
|
||||
return res, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
@@ -830,7 +831,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
|
||||
handleBufferedNetworkReader(&br)
|
||||
}()
|
||||
|
||||
for i := uint(0); i < nBuffers; i++ {
|
||||
for range nBuffers {
|
||||
b := bufferedNetworkReaderBuffer{
|
||||
data: make([]byte, bufferSize),
|
||||
}
|
||||
|
||||
1
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
var (
|
||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||
// docker V1 registry.
|
||||
// Deprecated: The V1 container registry detection is no longer performed, so this error is never returned.
|
||||
ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
|
||||
// ErrTooManyRequests is returned when the status code returned is 429
|
||||
ErrTooManyRequests = errors.New("too many requests to registry")
|
||||
|
||||
29
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
29
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@@ -20,22 +20,26 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer.
|
||||
// Destination is a partial implementation of private.ImageDestination for writing to a Writer.
|
||||
type Destination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
archive *Writer
|
||||
repoTags []reference.NamedTagged
|
||||
archive *Writer
|
||||
commitWithOptions func(ctx context.Context, options private.CommitOptions) error
|
||||
repoTags []reference.NamedTagged
|
||||
// Other state.
|
||||
config []byte
|
||||
sysCtx *types.SystemContext
|
||||
}
|
||||
|
||||
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
|
||||
func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination {
|
||||
// commitWithOptions implements ImageDestination.CommitWithOptions.
|
||||
func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged,
|
||||
commitWithOptions func(ctx context.Context, options private.CommitOptions) error) *Destination {
|
||||
repoTags := []reference.NamedTagged{}
|
||||
if ref != nil {
|
||||
repoTags = append(repoTags, ref)
|
||||
@@ -57,9 +61,10 @@ func NewDestination(sys *types.SystemContext, archive *Writer, transportName str
|
||||
NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName),
|
||||
NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"),
|
||||
|
||||
archive: archive,
|
||||
repoTags: repoTags,
|
||||
sysCtx: sys,
|
||||
archive: archive,
|
||||
commitWithOptions: commitWithOptions,
|
||||
repoTags: repoTags,
|
||||
sysCtx: sys,
|
||||
}
|
||||
dest.Compat = impl.AddCompat(dest)
|
||||
return dest
|
||||
@@ -179,3 +184,13 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
|
||||
|
||||
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
|
||||
}
|
||||
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *Destination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
// This indirection exists because impl.Compat expects all ImageDestinationInternalOnly methods
|
||||
// to be implemented in one place.
|
||||
return d.commitWithOptions(ctx, options)
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
@@ -3,6 +3,7 @@ package docker
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
9
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
9
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
@@ -27,7 +27,14 @@ func (bic *v1OnlyBlobInfoCache) Open() {
|
||||
func (bic *v1OnlyBlobInfoCache) Close() {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
|
||||
|
||||
42
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
42
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
@@ -26,19 +26,40 @@ type BlobInfoCache2 interface {
|
||||
// Close destroys state created by Open().
|
||||
Close()
|
||||
|
||||
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||
// or Uncompressed or UnknownCompression.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||
// digest just because some remote author claims so (e.g. because a manifest says so);
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest)
|
||||
|
||||
// RecordDigestCompressorData records data for the blob with the specified digest.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
|
||||
RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData)
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
|
||||
}
|
||||
|
||||
// DigestCompressorData is information known about how a blob is compressed.
|
||||
// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.)
|
||||
type DigestCompressorData struct {
|
||||
BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression.
|
||||
// The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression:
|
||||
SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant)
|
||||
SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant.
|
||||
}
|
||||
|
||||
// CandidateLocations2Options are used in CandidateLocations2.
|
||||
type CandidateLocations2Options struct {
|
||||
// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
@@ -51,9 +72,10 @@ type CandidateLocations2Options struct {
|
||||
|
||||
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
}
|
||||
|
||||
13
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
13
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
@@ -99,3 +99,16 @@ func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanc
|
||||
}
|
||||
return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest)
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (c *Compat) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
return c.dest.CommitWithOptions(ctx, private.CommitOptions{
|
||||
UnparsedToplevel: unparsedToplevel,
|
||||
})
|
||||
}
|
||||
|
||||
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package stubs
|
||||
|
||||
import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing.
|
||||
type IgnoresOriginalOCIConfig struct{}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return nil
|
||||
}
|
||||
@@ -36,8 +36,9 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||
// The fallback _must not_ be done otherwise.
|
||||
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
||||
}
|
||||
|
||||
12
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
12
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
// wrapped provides the private.ImageDestination operations
|
||||
// for a destination that only implements types.ImageDestination
|
||||
type wrapped struct {
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
types.ImageDestination
|
||||
@@ -76,6 +77,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob
|
||||
Size: blob.Size,
|
||||
CompressionOperation: blob.CompressionOperation,
|
||||
CompressionAlgorithm: blob.CompressionAlgorithm,
|
||||
// CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated
|
||||
// annotations, and we didn’t use the blob.Annotations field previously, so we’ll
|
||||
// continue not using it.
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -94,3 +98,11 @@ func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []sign
|
||||
}
|
||||
return w.PutSignatures(ctx, simpleSigs, instanceDigest)
|
||||
}
|
||||
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
func (w *wrapped) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
return w.Commit(ctx, options.UnparsedToplevel)
|
||||
}
|
||||
|
||||
37
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
37
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
@@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateOldDigest: list.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
return index.editInstances(editInstances)
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []Schema2ManifestDescriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
@@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
list.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
list.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
if editInstance.AddPlatform == nil {
|
||||
// Should we create a struct with empty fields instead?
|
||||
@@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if len(addedEntries) != 0 {
|
||||
// slices.Clone() here to ensure a private backing array;
|
||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
list.Manifests = append(slices.Clone(list.Manifests), addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
func (list *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
@@ -152,10 +152,7 @@ func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemCont
|
||||
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
|
||||
// of the image which is appropriate for the current environment.
|
||||
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(ctx)
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
for _, d := range list.Manifests {
|
||||
imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
|
||||
@@ -283,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
|
||||
return &Schema2List{*public}
|
||||
}
|
||||
|
||||
func (index *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
|
||||
func (list *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic))
|
||||
}
|
||||
|
||||
func (index *Schema2List) Clone() ListPublic {
|
||||
return index.CloneInternal()
|
||||
func (list *Schema2List) Clone() ListPublic {
|
||||
return list.CloneInternal()
|
||||
}
|
||||
|
||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
|
||||
5
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
5
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
@@ -205,11 +205,6 @@ type ReuseConditions struct {
|
||||
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
|
||||
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
|
||||
if c.RequiredCompression != nil {
|
||||
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil ||
|
||||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
|
||||
5
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
5
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
@@ -236,10 +236,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
||||
if preferGzip == types.OptionalBoolTrue {
|
||||
didPreferGzip = true
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(ctx)
|
||||
var bestMatch *instanceCandidate
|
||||
bestMatch = nil
|
||||
for manifestIndex, d := range index.Manifests {
|
||||
|
||||
4
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
4
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
@@ -153,7 +153,7 @@ var compatibility = map[string][]string{
|
||||
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
|
||||
// the most compatible platform is first.
|
||||
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
||||
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform {
|
||||
// Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
|
||||
// The fields are not specified by the OCI specification, as of version 1.1, usefully enough
|
||||
// to be interoperable, anyway.
|
||||
@@ -211,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
Variant: v,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
return res
|
||||
}
|
||||
|
||||
// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
|
||||
|
||||
55
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
55
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageSourceInternalOnly is the part of private.ImageSource that is not
|
||||
@@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface {
|
||||
// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
|
||||
// on unsupported formats.
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@@ -53,8 +60,9 @@ type ImageDestinationInternalOnly interface {
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||
// The fallback _must not_ be done otherwise.
|
||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error)
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
@@ -69,6 +77,12 @@ type ImageDestinationInternalOnly interface {
|
||||
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
|
||||
// MUST be called after PutManifest (signatures may reference manifest contents).
|
||||
PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error
|
||||
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
CommitWithOptions(ctx context.Context, options CommitOptions) error
|
||||
}
|
||||
|
||||
// ImageDestination is an internal extension to the types.ImageDestination
|
||||
@@ -134,12 +148,30 @@ type ReusedBlob struct {
|
||||
Size int64 // Must be provided
|
||||
// The following compression fields should be set when the reuse substitutes
|
||||
// a differently-compressed blob.
|
||||
// They may be set also to change from a base variant to a specific variant of an algorithm.
|
||||
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
|
||||
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
|
||||
|
||||
// Annotations that should be added, for CompressionAlgorithm. Note that they might need to be
|
||||
// added even if the digest doesn’t change (if we found the annotations in a cache).
|
||||
CompressionAnnotations map[string]string
|
||||
|
||||
MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
|
||||
}
|
||||
|
||||
// CommitOptions are used in CommitWithOptions
|
||||
type CommitOptions struct {
|
||||
// UnparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
UnparsedToplevel types.UnparsedImage
|
||||
// ReportResolvedReference, if set, asks the transport to store a “resolved” (more detailed) reference to the created image
|
||||
// into the value this option points to.
|
||||
// What “resolved” means is transport-specific.
|
||||
// Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value.
|
||||
ReportResolvedReference *types.ImageReference
|
||||
}
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceChunk struct {
|
||||
@@ -178,3 +210,22 @@ type UnparsedImage interface {
|
||||
// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
|
||||
UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
|
||||
}
|
||||
|
||||
// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial.
|
||||
// It suggests to the caller that a fallback mechanism can be used instead of a hard failure;
|
||||
// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob.
|
||||
type ErrFallbackToOrdinaryLayerDownload struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
|
||||
return c.err.Error()
|
||||
}
|
||||
|
||||
func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
|
||||
return c.err
|
||||
}
|
||||
|
||||
func NewErrFallbackToOrdinaryLayerDownload(err error) error {
|
||||
return ErrFallbackToOrdinaryLayerDownload{err: err}
|
||||
}
|
||||
|
||||
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build !linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
6
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
6
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
@@ -318,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||
// Add the history and rootfs information.
|
||||
rootfs, err := json.Marshal(rootFS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
|
||||
return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err)
|
||||
}
|
||||
rawRootfs := json.RawMessage(rootfs)
|
||||
raw["rootfs"] = &rawRootfs
|
||||
history, err := json.Marshal(convertedHistory)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err)
|
||||
return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err)
|
||||
}
|
||||
rawHistory := json.RawMessage(history)
|
||||
raw["history"] = &rawHistory
|
||||
// Encode the result.
|
||||
config, err = json.Marshal(raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err)
|
||||
return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
@@ -202,7 +202,7 @@ func (m *Schema2) ConfigInfo() types.BlobInfo {
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema2) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
blobs := make([]LayerInfo, 0, len(m.LayersDescriptors))
|
||||
for _, layer := range m.LayersDescriptors {
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromSchema2Descriptor(layer),
|
||||
|
||||
4
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@@ -60,7 +60,7 @@ func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
|
||||
if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
|
||||
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest,
|
||||
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -95,7 +95,7 @@ func (m *OCI1) ConfigInfo() types.BlobInfo {
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *OCI1) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
blobs := make([]LayerInfo, 0, len(m.Layers))
|
||||
for _, layer := range m.Layers {
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromOCI1Descriptor(layer),
|
||||
|
||||
27
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
27
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
||||
return d.unpackedDest.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@@ -117,8 +126,9 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||
// The fallback _must not_ be done otherwise.
|
||||
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
||||
}
|
||||
@@ -149,13 +159,12 @@ func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context
|
||||
return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// after the directory is made, it is tarred up into a file and the directory is deleted
|
||||
func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
if err := d.unpackedDest.CommitWithOptions(ctx, options); err != nil {
|
||||
return fmt.Errorf("storing image %q: %w", d.ref.image, err)
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user