mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
73 Commits
v1.17.0
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb0302daea | ||
|
|
f4c9af64cb | ||
|
|
45c0a85e35 | ||
|
|
bfd0850f06 | ||
|
|
582ee15bbb | ||
|
|
89cbcbf9c6 | ||
|
|
64361bde06 | ||
|
|
955233070b | ||
|
|
bd1ac4668f | ||
|
|
2b3701b7be | ||
|
|
59ec554738 | ||
|
|
83d0e76b83 | ||
|
|
04d65888a3 | ||
|
|
958d586c45 | ||
|
|
137a912c55 | ||
|
|
dbf619c027 | ||
|
|
52895bc6cd | ||
|
|
d18677e479 | ||
|
|
b78a415987 | ||
|
|
0eb4ad2f54 | ||
|
|
5eba061489 | ||
|
|
9764c99dbd | ||
|
|
54d235403a | ||
|
|
a81cb65fac | ||
|
|
7d6169d219 | ||
|
|
85fa4dff42 | ||
|
|
8380f284c7 | ||
|
|
ed0efc6932 | ||
|
|
3315da48b5 | ||
|
|
ab53f64473 | ||
|
|
c737e5daea | ||
|
|
653db36664 | ||
|
|
0633de6350 | ||
|
|
6483de4894 | ||
|
|
3d2c80f58f | ||
|
|
b5a13bccfd | ||
|
|
eae9e8862c | ||
|
|
392aa3f70f | ||
|
|
b68afb1a73 | ||
|
|
0eb0ac3707 | ||
|
|
2a47dff7e1 | ||
|
|
631555fc83 | ||
|
|
83efeea4f3 | ||
|
|
da5c8d6e7b | ||
|
|
deda96636b | ||
|
|
17ea741413 | ||
|
|
fb777d3906 | ||
|
|
bf5987896a | ||
|
|
568d5d1c6f | ||
|
|
f333a897f6 | ||
|
|
1866ecbda2 | ||
|
|
f7801f77cc | ||
|
|
b116c5bdce | ||
|
|
e288827449 | ||
|
|
27baed919d | ||
|
|
4863e05f5f | ||
|
|
a71a8b4c48 | ||
|
|
4541d649a6 | ||
|
|
96f3804385 | ||
|
|
be26f2eb2f | ||
|
|
e9755957df | ||
|
|
74488c4b86 | ||
|
|
2a3c8ee5b1 | ||
|
|
87f199fbaf | ||
|
|
f423f01d1b | ||
|
|
c5eaf49918 | ||
|
|
186e9b4f0b | ||
|
|
8896960688 | ||
|
|
f818827f6d | ||
|
|
0c25d2c9fb | ||
|
|
bae8ccd7fb | ||
|
|
181429435e | ||
|
|
293ac065b7 |
259
.cirrus.yml
259
.cirrus.yml
@@ -1,259 +0,0 @@
|
||||
---
|
||||
|
||||
# Main collection of env. vars to set for all tasks and scripts.
|
||||
env:
|
||||
####
|
||||
#### Global variables used for all tasks
|
||||
####
|
||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||
DEST_BRANCH: "main"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
GOPATH: &gopath "/var/tmp/go"
|
||||
GOBIN: "${GOPATH}/bin"
|
||||
GOCACHE: "${GOPATH}/cache"
|
||||
GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/skopeo"
|
||||
# Required for consistency with containers/image CI
|
||||
SKOPEO_PATH: *gosrc
|
||||
CIRRUS_WORKING_DIR: *gosrc
|
||||
# The default is 'sh' if unspecified
|
||||
CIRRUS_SHELL: "/bin/bash"
|
||||
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
|
||||
SCRIPT_BASE: "./contrib/cirrus"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c20241107t210000z-f41f40d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Built along with the standard PR-based workflow in c/automation_images
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
|
||||
|
||||
|
||||
# Default timeout for each task
|
||||
timeout_in: 45m
|
||||
|
||||
|
||||
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
|
||||
|
||||
|
||||
validate_task:
|
||||
# The git-validation tool doesn't work well on branch or tag push,
|
||||
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
|
||||
# Only do validation for PRs.
|
||||
only_if: &is_pr $CIRRUS_PR != ''
|
||||
container:
|
||||
image: '${SKOPEO_CIDEV_CONTAINER_FQIN}'
|
||||
cpu: 4
|
||||
memory: 8
|
||||
setup_script: |
|
||||
make tools
|
||||
test_script: |
|
||||
make validate-local
|
||||
make vendor && hack/tree_status.sh
|
||||
|
||||
doccheck_task:
|
||||
only_if: *is_pr
|
||||
depends_on:
|
||||
- validate
|
||||
container:
|
||||
image: "${FEDORA_CONTAINER_FQIN}"
|
||||
cpu: 4
|
||||
memory: 8
|
||||
env:
|
||||
BUILDTAGS: &withopengpg 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||
script: |
|
||||
# TODO: Can't use 'runner.sh setup' inside container. However,
|
||||
# removing the pre-installed package is the only necessary step
|
||||
# at the time of this comment.
|
||||
dnf remove -y skopeo # Guarantee non-interference
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
|
||||
|
||||
osx_task:
|
||||
# Don't run for docs-only builds.
|
||||
# Also don't run on release-branches or their PRs,
|
||||
# since base container-image is not version-constrained.
|
||||
only_if: ¬_docs_or_release_branch >-
|
||||
($CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH ||
|
||||
$CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH ) &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
persistent_worker: &mac_pw
|
||||
labels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
purpose: prod
|
||||
env:
|
||||
CIRRUS_WORKING_DIR: "$HOME/ci/task-${CIRRUS_TASK_ID}"
|
||||
# Prevent cache-pollution fron one task to the next.
|
||||
GOPATH: "$CIRRUS_WORKING_DIR/.go"
|
||||
GOCACHE: "$CIRRUS_WORKING_DIR/.go/cache"
|
||||
GOENV: "$CIRRUS_WORKING_DIR/.go/support"
|
||||
GOSRC: "$HOME/ci/task-${CIRRUS_TASK_ID}"
|
||||
TMPDIR: "/private/tmp/ci"
|
||||
# This host is/was shared with potentially many other CI tasks.
|
||||
# The previous task may have been canceled or aborted.
|
||||
prep_script: &mac_cleanup "contrib/cirrus/mac_cleanup.sh"
|
||||
test_script:
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
- go version
|
||||
- go env
|
||||
- make tools
|
||||
- make validate-local test-unit-local bin/skopeo
|
||||
- bin/skopeo -v
|
||||
# This host is/was shared with potentially many other CI tasks.
|
||||
# Ensure nothing is left running while waiting for the next task.
|
||||
always:
|
||||
task_cleanup_script: *mac_cleanup
|
||||
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance: &standardvm
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
cross_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" cross
|
||||
|
||||
|
||||
ostree-rs-ext_task:
|
||||
alias: proxy_ostree_ext
|
||||
only_if: *not_docs_or_release_branch
|
||||
# WARNING: This task potentially performs a container image
|
||||
# build (on change) with runtime package installs. Therefore,
|
||||
# its behavior can be unpredictable and potentially flake-prone.
|
||||
# In case of emergency, uncomment the next statement to bypass.
|
||||
#
|
||||
# skip: $CI == "true"
|
||||
#
|
||||
depends_on:
|
||||
- validate
|
||||
# Ref: https://cirrus-ci.org/guide/docker-builder-vm/#dockerfile-as-a-ci-environment
|
||||
container:
|
||||
# The runtime image will be rebuilt on change
|
||||
dockerfile: contrib/cirrus/ostree_ext.dockerfile
|
||||
docker_arguments: # required build-args
|
||||
BASE_FQIN: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
CIRRUS_IMAGE_VERSION: 3
|
||||
env:
|
||||
EXT_REPO_NAME: ostree-rs-ext
|
||||
EXT_REPO_HOME: $CIRRUS_WORKING_DIR/../$EXT_REPO_NAME
|
||||
EXT_REPO: https://github.com/ostreedev/${EXT_REPO_NAME}.git
|
||||
skopeo_build_script:
|
||||
- dnf builddep -y skopeo
|
||||
- make
|
||||
- make install
|
||||
proxy_ostree_ext_build_script:
|
||||
- git clone --depth 1 $EXT_REPO $EXT_REPO_HOME
|
||||
- cd $EXT_REPO_HOME
|
||||
- cargo test --no-run
|
||||
proxy_ostree_ext_test_script:
|
||||
- cd $EXT_REPO_HOME
|
||||
- cargo test -- --nocapture --quiet
|
||||
|
||||
|
||||
#####
|
||||
##### NOTE: This task is subtantially duplicated in the containers/image
|
||||
##### repository's `.cirrus.yml`. Changes made here should be fully merged
|
||||
##### prior to being manually duplicated and maintained in containers/image.
|
||||
#####
|
||||
test_skopeo_task:
|
||||
alias: test_skopeo
|
||||
# Don't test for [CI:DOCS], [CI:BUILD].
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
matrix:
|
||||
- name: "Skopeo Test" # N/B: Name ref. by hack/get_fqin.sh
|
||||
env:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
|
||||
- name: "Skopeo Test w/ opengpg"
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
vendor_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
|
||||
build_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
unit_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
|
||||
integration_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" integration
|
||||
system_script: >
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
|
||||
|
||||
|
||||
# This task is critical. It updates the "last-used by" timestamp stored
|
||||
# in metadata for all VM images. This mechanism functions in tandem with
|
||||
# an out-of-band pruning operation to remove disused VM images.
|
||||
meta_task:
|
||||
name: "VM img. keepalive"
|
||||
alias: meta
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: |
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
GCPNAME: ENCRYPTED[1cf558ae125e3c39ec401e443ad76452b25d790c45eb73d77c83eb059a0f7fd5085ef7e2f7e410b04ea6e83b0aab2eb1]
|
||||
GCPPROJECT: libpod-218412
|
||||
clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
|
||||
script: /usr/local/bin/entrypoint.sh
|
||||
|
||||
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
# of this task.
|
||||
success_task:
|
||||
name: "Total Success"
|
||||
alias: success
|
||||
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
|
||||
depends_on:
|
||||
- validate
|
||||
- doccheck
|
||||
- osx
|
||||
- cross
|
||||
- proxy_ostree_ext
|
||||
- test_skopeo
|
||||
- meta
|
||||
container: *smallcontainer
|
||||
env:
|
||||
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
|
||||
TEST_ENVIRON: container
|
||||
clone_script: *noop
|
||||
script: /bin/true
|
||||
1
.fmf/version
Normal file
1
.fmf/version
Normal file
@@ -0,0 +1 @@
|
||||
1
|
||||
138
.packit.yaml
138
.packit.yaml
@@ -2,116 +2,64 @@
|
||||
# See the documentation for more information:
|
||||
# https://packit.dev/docs/configuration/
|
||||
|
||||
# NOTE: The Packit copr_build tasks help to check if every commit builds on
|
||||
# supported Fedora and CentOS Stream arches.
|
||||
# They do not block the current Cirrus-based workflow.
|
||||
|
||||
downstream_package_name: skopeo
|
||||
upstream_tag_template: v{version}
|
||||
|
||||
packages:
|
||||
skopeo-fedora:
|
||||
pkg_tool: fedpkg
|
||||
specfile_path: rpm/skopeo.spec
|
||||
skopeo-centos:
|
||||
pkg_tool: centpkg
|
||||
specfile_path: rpm/skopeo.spec
|
||||
skopeo-rhel:
|
||||
specfile_path: rpm/skopeo.spec
|
||||
skopeo-eln:
|
||||
specfile_path: rpm/skopeo.spec
|
||||
specfile_path: skopeo.spec
|
||||
|
||||
srpm_build_deps:
|
||||
- make
|
||||
# Disable automatic merging for Copr builds (and subsequent Testing Farm)
|
||||
merge_pr_in_ci: false
|
||||
|
||||
jobs:
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
packages: [skopeo-fedora]
|
||||
notifications: &copr_build_failure_notification
|
||||
notifications: &packit_failure_notification
|
||||
failure_comment:
|
||||
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
||||
targets:
|
||||
- fedora-development-x86_64
|
||||
- fedora-development-aarch64
|
||||
- fedora-latest-x86_64
|
||||
- fedora-latest-aarch64
|
||||
- fedora-latest-stable-x86_64
|
||||
- fedora-latest-stable-aarch64
|
||||
- fedora-40-x86_64
|
||||
- fedora-40-aarch64
|
||||
message: "Packit jobs failed. @containers/packit-build please check."
|
||||
enable_net: true
|
||||
targets:
|
||||
- epel-10-x86_64
|
||||
- epel-10-aarch64
|
||||
actions:
|
||||
post-upstream-clone: "curl --fail -O https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/raw/c10s/skopeo.spec"
|
||||
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
packages: [skopeo-eln]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets:
|
||||
fedora-eln-x86_64:
|
||||
additional_repos:
|
||||
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/x86_64/"
|
||||
fedora-eln-aarch64:
|
||||
additional_repos:
|
||||
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/"
|
||||
notifications: *packit_failure_notification
|
||||
enable_net: true
|
||||
targets:
|
||||
- epel-9-x86_64
|
||||
- epel-9-aarch64
|
||||
actions:
|
||||
post-upstream-clone: "curl --fail -O https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/raw/c9s/skopeo.spec"
|
||||
|
||||
- job: copr_build
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
packages: [skopeo-centos]
|
||||
notifications: *copr_build_failure_notification
|
||||
use_internal_tf: true
|
||||
notifications: *packit_failure_notification
|
||||
targets:
|
||||
- centos-stream-9-x86_64
|
||||
- centos-stream-9-aarch64
|
||||
- centos-stream-10-x86_64
|
||||
- centos-stream-10-aarch64
|
||||
enable_net: true
|
||||
epel-10-x86_64:
|
||||
distros: [RHEL-10-Nightly,RHEL-10.0-Nightly]
|
||||
epel-10-aarch64:
|
||||
distros: [RHEL-10-Nightly,RHEL-10.0-Nightly]
|
||||
epel-9-x86_64:
|
||||
distros: [RHEL-9-Nightly,RHEL-9.6.0-Nightly]
|
||||
epel-9-aarch64:
|
||||
distros: [RHEL-9-Nightly,RHEL-9.6.0-Nightly]
|
||||
tmt_plan: "/plans/system"
|
||||
identifier: "rpm"
|
||||
|
||||
# Disabled until there is go 1.22 in epel-9
|
||||
# - job: copr_build
|
||||
# trigger: pull_request
|
||||
# packages: [skopeo-rhel]
|
||||
# notifications: *copr_build_failure_notification
|
||||
# targets:
|
||||
# - epel-9-x86_64
|
||||
# - epel-9-aarch64
|
||||
# enable_net: true
|
||||
|
||||
# Run on commit to main branch
|
||||
- job: copr_build
|
||||
trigger: commit
|
||||
packages: [skopeo-fedora]
|
||||
notifications:
|
||||
failure_comment:
|
||||
message: "podman-next COPR build failed. @containers/packit-build please check."
|
||||
branch: main
|
||||
owner: rhcontainerbot
|
||||
project: podman-next
|
||||
enable_net: true
|
||||
|
||||
# Sync to Fedora
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
packages: [skopeo-fedora]
|
||||
update_release: false
|
||||
dist_git_branches: &fedora_targets
|
||||
- fedora-all
|
||||
|
||||
# Sync to CentOS Stream
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
packages: [skopeo-centos]
|
||||
update_release: false
|
||||
dist_git_branches:
|
||||
- c10s
|
||||
|
||||
# Fedora Koji build
|
||||
- job: koji_build
|
||||
trigger: commit
|
||||
packages: [skopeo-fedora]
|
||||
sidetag_group: podman-releases
|
||||
# Dependents are not rpm dependencies, but the package whose bodhi update
|
||||
# should include this package.
|
||||
# Ref: https://packit.dev/docs/fedora-releases-guide/releasing-multiple-packages
|
||||
dependents:
|
||||
- podman
|
||||
dist_git_branches: *fedora_targets
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
use_internal_tf: true
|
||||
notifications: *packit_failure_notification
|
||||
skip_build: true
|
||||
targets:
|
||||
# RHEL-N-Nightly can often have newer toolchain packages, breaking
|
||||
# vendoring. So, only test on N.Y here
|
||||
epel-10-x86_64:
|
||||
distros: [RHEL-10.0-Nightly]
|
||||
epel-9-x86_64:
|
||||
distros: [RHEL-9.6.0-Nightly]
|
||||
tmt_plan: "/plans/no-rpm"
|
||||
identifier: "no-rpm"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
## The skopeo Project Community Code of Conduct
|
||||
|
||||
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
||||
The skopeo project, as part of Podman Container Tools, follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
6
Makefile
6
Makefile
@@ -27,7 +27,7 @@ GOARCH ?= $(shell go env GOARCH)
|
||||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 1.61.0
|
||||
GOLANGCI_LINT_VERSION := 1.63.4
|
||||
|
||||
ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
@@ -70,7 +70,9 @@ export SKOPEO_CONTAINER_TESTS ?= $(if $(CI),1,0)
|
||||
# This is a compromise, we either use a container for this or require
|
||||
# the local user to have a compatible python3 development environment.
|
||||
# Define it as a "resolve on use" variable to avoid calling out when possible
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN ?= $(shell hack/get_fqin.sh)
|
||||
#SKOPEO_CIDEV_CONTAINER_FQIN ?= $(shell hack/get_fqin.sh)
|
||||
# FIXME: hack/get_fqin.sh depends on cirrus.yml so we hardcode SKOPEO_CIDEV_CONTAINER_FQIN here
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN ?= "quay.io/libpod/skopeo_cidev:c20250131t121915z-f41f40d13"
|
||||
CONTAINER_CMD ?= ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" -e CI=$(CI) -e SKOPEO_CONTAINER_TESTS=1
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
|
||||
18
cmd/skopeo/copy_test.go
Normal file
18
cmd/skopeo/copy_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1"},
|
||||
{"a1", "a2", "a3"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"--insecure-policy", "copy"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly two arguments expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in integration and systemtest
|
||||
}
|
||||
@@ -54,3 +54,17 @@ func TestDockerRepositoryReferenceParserDrift(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListTags(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"list-tags"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly one non-option argument expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in systemtest
|
||||
}
|
||||
|
||||
@@ -44,3 +44,18 @@ func TestTLSVerifyConfig(t *testing.T) {
|
||||
err := yaml.Unmarshal([]byte(`tls-verify: "not a valid bool"`), &config)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1"},
|
||||
{"a1", "a2", "a3"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"sync"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly two arguments expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in integration and systemtest
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
@@ -26,7 +27,7 @@ import (
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that the command’s help should be included.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
@@ -62,7 +63,8 @@ func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd
|
||||
err := handler(args, c.OutOrStdout())
|
||||
var shouldDisplayUsage errorShouldDisplayUsage
|
||||
if errors.As(err, &shouldDisplayUsage) {
|
||||
return c.Help()
|
||||
c.SetOut(c.ErrOrStderr()) // This mutates c, but we are failing anyway.
|
||||
_ = c.Help() // Even if this failed, we prefer to report the original error
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -183,6 +185,7 @@ func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
fs.DurationVar(&opts.Delay, "retry-delay", 0*time.Second, "Fixed delay between retries. If not set, retry uses an exponential backoff delay.")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
|
||||
@@ -204,7 +204,11 @@ Precompute digests to ensure layers are not uploaded that already exist on the d
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--src-username**
|
||||
|
||||
|
||||
@@ -66,7 +66,11 @@ Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
|
||||
@@ -65,7 +65,11 @@ Registry token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
|
||||
@@ -39,7 +39,11 @@ Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
|
||||
@@ -123,7 +123,13 @@ The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-privat
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
|
||||
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--keep-going**
|
||||
If any errors occur during copying of images, those errors are logged and the process continues syncing rest of the images and finally fails at the end.
|
||||
|
||||
82
go.mod
82
go.mod
@@ -1,27 +1,27 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
// Minimum required golang version
|
||||
go 1.22.6
|
||||
go 1.22.8
|
||||
|
||||
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.3.0
|
||||
github.com/containers/common v0.61.0
|
||||
github.com/containers/image/v5 v5.33.0
|
||||
github.com/containers/ocicrypt v1.2.0
|
||||
github.com/containers/storage v1.56.0
|
||||
github.com/Masterminds/semver/v3 v3.3.1
|
||||
github.com/containers/common v0.62.0
|
||||
github.com/containers/image/v5 v5.34.0
|
||||
github.com/containers/ocicrypt v1.2.1
|
||||
github.com/containers/storage v1.57.1
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/moby/sys/capability v0.3.0
|
||||
github.com/moby/sys/capability v0.4.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
|
||||
golang.org/x/term v0.26.0
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329
|
||||
golang.org/x/term v0.29.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
@@ -36,22 +36,22 @@ require (
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.4 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v27.3.1+incompatible // indirect
|
||||
github.com/docker/docker v27.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
@@ -99,41 +99,41 @@ require (
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/proglottis/gpgme v0.1.4 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.4 // indirect
|
||||
github.com/sigstore/rekor v1.3.6 // indirect
|
||||
github.com/sigstore/sigstore v1.8.9 // indirect
|
||||
github.com/sigstore/rekor v1.3.8 // indirect
|
||||
github.com/sigstore/sigstore v1.8.12 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
||||
github.com/sylabs/sif/v2 v2.19.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.20.2 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.6 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.8.3 // indirect
|
||||
github.com/vbatts/tar-split v0.11.7 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
golang.org/x/crypto v0.29.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.9.0 // indirect
|
||||
golang.org/x/sys v0.27.0 // indirect
|
||||
golang.org/x/text v0.20.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.67.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/grpc v1.69.4 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
)
|
||||
|
||||
226
go.sum
226
go.sum
@@ -8,8 +8,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
|
||||
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
|
||||
github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
|
||||
@@ -37,39 +37,39 @@ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151X
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
|
||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||
github.com/containers/common v0.61.0 h1:j/84PTqZIKKYy42OEJsZmjZ4g4Kq2ERuC3tqp2yWdh4=
|
||||
github.com/containers/common v0.61.0/go.mod h1:NGRISq2vTFPSbhNqj6MLwyes4tWSlCnqbJg7R77B8xc=
|
||||
github.com/containers/image/v5 v5.33.0 h1:6oPEFwTurf7pDTGw7TghqGs8K0+OvPtY/UyzU0B2DfE=
|
||||
github.com/containers/image/v5 v5.33.0/go.mod h1:T7HpASmvnp2H1u4cyckMvCzLuYgpD18dSmabSw0AcHk=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
|
||||
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
|
||||
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||
github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10=
|
||||
github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s=
|
||||
github.com/containers/image/v5 v5.34.0 h1:HPqQaDUsox/3mC1pbOyLAIQEp0JhQqiUZ+6JiFIZLDI=
|
||||
github.com/containers/image/v5 v5.34.0/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
|
||||
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
|
||||
github.com/containers/storage v1.56.0 h1:DZ9KSkj6M2tvj/4bBoaJu3QDHRl35BwsZ4kmLJS97ZI=
|
||||
github.com/containers/storage v1.56.0/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||
github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8=
|
||||
github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8=
|
||||
github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
|
||||
github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY=
|
||||
github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
|
||||
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
|
||||
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
@@ -89,10 +89,10 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
|
||||
github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -157,8 +157,8 @@ github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEva
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@@ -218,8 +218,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg=
|
||||
github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
|
||||
github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
|
||||
github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
@@ -237,10 +237,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
@@ -260,10 +260,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
|
||||
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M=
|
||||
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
@@ -281,8 +281,8 @@ github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06Oy
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
@@ -291,18 +291,21 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
|
||||
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
||||
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
|
||||
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
|
||||
github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk=
|
||||
github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w=
|
||||
github.com/sigstore/rekor v1.3.8 h1:B8kJI8mpSIXova4Jxa6vXdJyysRxFGsEsLKBDl0rRjA=
|
||||
github.com/sigstore/rekor v1.3.8/go.mod h1:/dHFYKSuxEygfDRnEwyJ+ZD6qoVYNXQdi1mJrKvKWsI=
|
||||
github.com/sigstore/sigstore v1.8.12 h1:S8xMVZbE2z9ZBuQUEG737pxdLjnbOIcFi5v9UFfkJFc=
|
||||
github.com/sigstore/sigstore v1.8.12/go.mod h1:+PYQAa8rfw0QdPpBcT+Gl3egKD9c+TUgAlF12H3Nmjo=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU=
|
||||
github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -313,21 +316,21 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
|
||||
github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
|
||||
github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
|
||||
github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
|
||||
github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ=
|
||||
github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk=
|
||||
github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U=
|
||||
github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 h1:LH5R3lXPfE2e3lIGxN7WNWv3Hl5nWO6LRi2B0L0ERHw=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1/go.mod h1:4XMvznPh8nfe2NpnDo1QTPvW9MVkUhbG90mPWvmOzcQ=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@@ -350,24 +353,24 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@@ -376,12 +379,15 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
|
||||
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@@ -389,8 +395,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -404,11 +413,14 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -416,8 +428,11 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -429,26 +444,35 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
|
||||
golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -459,8 +483,10 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -470,18 +496,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
|
||||
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -491,8 +517,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
# TMT breaks this so we only check go.* and vendor
|
||||
# https://github.com/teemtee/tmt/issues/3800
|
||||
# STATUS=$(git status --porcelain)
|
||||
STATUS=$(git status --porcelain go.* vendor)
|
||||
if [[ -z $STATUS ]]
|
||||
then
|
||||
echo "tree is clean"
|
||||
|
||||
23
plans/no-rpm.fmf
Normal file
23
plans/no-rpm.fmf
Normal file
@@ -0,0 +1,23 @@
|
||||
prepare:
|
||||
- name: dependencies
|
||||
how: install
|
||||
package: [ golang, go-md2man, gpgme-devel, podman-docker ]
|
||||
|
||||
discover:
|
||||
keep-git-metadata: true
|
||||
how: shell
|
||||
tests:
|
||||
- name: /validate
|
||||
test: >
|
||||
make tools &&
|
||||
make BUILDTAGS="exclude_graphdriver_btrfs" validate-local &&
|
||||
make validate-docs &&
|
||||
make vendor &&
|
||||
hack/tree_status.sh
|
||||
- name: /integration
|
||||
test: make BUILDTAGS="exclude_graphdriver_btrfs" test-integration
|
||||
- name: /unit
|
||||
test: make BUILDTAGS="exclude_graphdriver_btrfs" test-unit-local
|
||||
|
||||
execute:
|
||||
how: tmt
|
||||
13
plans/system.fmf
Normal file
13
plans/system.fmf
Normal file
@@ -0,0 +1,13 @@
|
||||
discover:
|
||||
how: fmf
|
||||
filter: 'tag:system'
|
||||
execute:
|
||||
how: tmt
|
||||
prepare:
|
||||
- how: shell
|
||||
script: |
|
||||
BATS_VERSION=1.12.0
|
||||
curl -L https://github.com/bats-core/bats-core/archive/refs/tags/v"$BATS_VERSION".tar.gz | tar -xz
|
||||
cd bats-core-"$BATS_VERSION"
|
||||
./install.sh /usr
|
||||
order: 10
|
||||
167
rpm/skopeo.spec
167
rpm/skopeo.spec
@@ -1,167 +0,0 @@
|
||||
%global with_debug 1
|
||||
|
||||
%if 0%{?with_debug}
|
||||
%global _find_debuginfo_dwz_opts %{nil}
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%else
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
# RHEL's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we
|
||||
# set it separately here and do not depend on RHEL's go-[s]rpm-macros package
|
||||
# until that's fixed.
|
||||
# c9s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227328
|
||||
# c8s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227331
|
||||
%if %{defined rhel}
|
||||
%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
%global gomodulesmode GO111MODULE=on
|
||||
|
||||
# No btrfs on RHEL
|
||||
%if %{defined fedora}
|
||||
%define build_with_btrfs 1
|
||||
%endif
|
||||
|
||||
# Only used in official koji builds
|
||||
# Copr builds set a separate epoch for all environments
|
||||
%if %{defined fedora}
|
||||
%define conditional_epoch 1
|
||||
%else
|
||||
%define conditional_epoch 2
|
||||
%endif
|
||||
|
||||
Name: skopeo
|
||||
%if %{defined copr_username}
|
||||
Epoch: 102
|
||||
%else
|
||||
Epoch: %{conditional_epoch}
|
||||
%endif
|
||||
# DO NOT TOUCH the Version string!
|
||||
# The TRUE source of this specfile is:
|
||||
# https://github.com/containers/skopeo/blob/main/rpm/skopeo.spec
|
||||
# If that's what you're reading, Version must be 0, and will be updated by Packit for
|
||||
# copr and koji builds.
|
||||
# If you're reading this on dist-git, the version is automatically filled in by Packit.
|
||||
Version: 0
|
||||
# The `AND` needs to be uppercase in the License for SPDX compatibility
|
||||
License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0
|
||||
Release: %autorelease
|
||||
%if %{defined golang_arches_future}
|
||||
ExclusiveArch: %{golang_arches_future}
|
||||
%else
|
||||
ExclusiveArch: aarch64 ppc64le s390x x86_64
|
||||
%endif
|
||||
Summary: Inspect container images and repositories on registries
|
||||
URL: https://github.com/containers/%{name}
|
||||
# Tarball fetched from upstream
|
||||
Source0: %{url}/archive/v%{version}.tar.gz
|
||||
BuildRequires: %{_bindir}/go-md2man
|
||||
%if %{defined build_with_btrfs}
|
||||
BuildRequires: btrfs-progs-devel
|
||||
%endif
|
||||
BuildRequires: git-core
|
||||
BuildRequires: golang
|
||||
%if !%{defined gobuild}
|
||||
BuildRequires: go-rpm-macros
|
||||
%endif
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: ostree-devel
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: make
|
||||
BuildRequires: shadow-utils-subid-devel
|
||||
Requires: containers-common >= 4:1-21
|
||||
|
||||
%description
|
||||
Command line utility to inspect images and repositories directly on Docker
|
||||
registries without the need to pull them
|
||||
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
%if %{defined fedora}
|
||||
Requires: bats
|
||||
Requires: fakeroot
|
||||
%endif
|
||||
Requires: gnupg
|
||||
Requires: jq
|
||||
Requires: golang
|
||||
Requires: podman
|
||||
Requires: crun
|
||||
Requires: httpd-tools
|
||||
Requires: openssl
|
||||
Requires: squashfs-tools
|
||||
|
||||
%description tests
|
||||
%{summary}
|
||||
|
||||
This package contains system tests for %{name}
|
||||
|
||||
%prep
|
||||
%autosetup -Sgit %{name}-%{version}
|
||||
# The %%install stage should not rebuild anything but only install what's
|
||||
# built in the %%build stage. So, remove any dependency on build targets.
|
||||
sed -i 's/^install-binary: bin\/%{name}.*/install-binary:/' Makefile
|
||||
sed -i 's/^completions: bin\/%{name}.*/completions:/' Makefile
|
||||
sed -i 's/^install-docs: docs.*/install-docs:/' Makefile
|
||||
|
||||
%build
|
||||
%set_build_flags
|
||||
export CGO_CFLAGS=$CFLAGS
|
||||
|
||||
# These extra flags present in $CFLAGS have been skipped for now as they break the build
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
|
||||
|
||||
%ifarch x86_64
|
||||
export CGO_CFLAGS="$CGO_CFLAGS -m64 -mtune=generic -fcf-protection=full"
|
||||
%endif
|
||||
|
||||
BASEBUILDTAGS="$(hack/libsubid_tag.sh)"
|
||||
%if %{defined build_with_btrfs}
|
||||
export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_tag.sh) $(hack/btrfs_installed_tag.sh)"
|
||||
%else
|
||||
export BUILDTAGS="$BASEBUILDTAGS btrfs_noversion exclude_graphdriver_btrfs"
|
||||
%endif
|
||||
|
||||
# unset LDFLAGS earlier set from set_build_flags
|
||||
LDFLAGS=''
|
||||
|
||||
%gobuild -o bin/%{name} ./cmd/%{name}
|
||||
%{__make} docs
|
||||
|
||||
%install
|
||||
make \
|
||||
DESTDIR=%{buildroot} \
|
||||
PREFIX=%{_prefix} \
|
||||
install-binary install-docs install-completions
|
||||
|
||||
# system tests
|
||||
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp -pav systemtest/* %{buildroot}/%{_datadir}/%{name}/test/system/
|
||||
|
||||
#define license tag if not already defined
|
||||
%{!?_licensedir:%global license %doc}
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
%{_bindir}/%{name}
|
||||
%{_mandir}/man1/%{name}*
|
||||
%dir %{_datadir}/bash-completion
|
||||
%dir %{_datadir}/bash-completion/completions
|
||||
%{_datadir}/bash-completion/completions/%{name}
|
||||
%dir %{_datadir}/fish/vendor_completions.d
|
||||
%{_datadir}/fish/vendor_completions.d/%{name}.fish
|
||||
%dir %{_datadir}/zsh/site-functions
|
||||
%{_datadir}/zsh/site-functions/_%{name}
|
||||
|
||||
%files tests
|
||||
%license LICENSE vendor/modules.txt
|
||||
%{_datadir}/%{name}/test
|
||||
|
||||
%changelog
|
||||
%autochangelog
|
||||
@@ -132,7 +132,7 @@ END_EXPECT
|
||||
@test "inspect: image unknown" {
|
||||
# non existing image
|
||||
run_skopeo 2 inspect containers-storage:non-existing-tag
|
||||
expect_output --substring "identifier is not an image" \
|
||||
expect_output --substring "does not resolve to an image ID" \
|
||||
"skopeo inspect containers-storage:010101010101"
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-${TEST_SOURCE_DIR}/../bin/skopeo}
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
# Default image to run as a local registry
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2.8.2}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN setup/teardown
|
||||
|
||||
10
systemtest/tmt/main.fmf
Normal file
10
systemtest/tmt/main.fmf
Normal file
@@ -0,0 +1,10 @@
|
||||
require:
|
||||
- skopeo-tests
|
||||
|
||||
environment:
|
||||
SKOPEO_BINARY: /usr/bin/skopeo
|
||||
|
||||
summary: System test
|
||||
test: bash ./test.sh
|
||||
duration: 60m
|
||||
tag: [ system ]
|
||||
12
systemtest/tmt/test.sh
Normal file
12
systemtest/tmt/test.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -exo pipefail
|
||||
|
||||
uname -r
|
||||
|
||||
rpm -q \
|
||||
containers-common \
|
||||
skopeo \
|
||||
skopeo-tests \
|
||||
|
||||
bats /usr/share/skopeo/test/system
|
||||
24
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
24
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
@@ -39,9 +39,11 @@ var (
|
||||
)
|
||||
|
||||
// semVerRegex is the regular expression used to parse a semantic version.
|
||||
const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
|
||||
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
|
||||
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
|
||||
// This is not the official regex from the semver spec. It has been modified to allow for loose handling
|
||||
// where versions like 2.1 are detected.
|
||||
const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
|
||||
`(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
|
||||
`(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
|
||||
|
||||
// Version represents a single semantic version.
|
||||
type Version struct {
|
||||
@@ -146,8 +148,8 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
sv := &Version{
|
||||
metadata: m[8],
|
||||
pre: m[5],
|
||||
metadata: m[5],
|
||||
pre: m[4],
|
||||
original: v,
|
||||
}
|
||||
|
||||
@@ -158,7 +160,7 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
if m[2] != "" {
|
||||
sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
|
||||
sv.minor, err = strconv.ParseUint(m[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
}
|
||||
@@ -167,7 +169,7 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
if m[3] != "" {
|
||||
sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
|
||||
sv.patch, err = strconv.ParseUint(m[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
}
|
||||
@@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool {
|
||||
func validatePrerelease(p string) error {
|
||||
eparts := strings.Split(p, ".")
|
||||
for _, p := range eparts {
|
||||
if containsOnly(p, num) {
|
||||
if p == "" {
|
||||
return ErrInvalidMetadata
|
||||
} else if containsOnly(p, num) {
|
||||
if len(p) > 1 && p[0] == '0' {
|
||||
return ErrSegmentStartsZero
|
||||
}
|
||||
@@ -631,7 +635,9 @@ func validatePrerelease(p string) error {
|
||||
func validateMetadata(m string) error {
|
||||
eparts := strings.Split(m, ".")
|
||||
for _, p := range eparts {
|
||||
if !containsOnly(p, allowed) {
|
||||
if p == "" {
|
||||
return ErrInvalidMetadata
|
||||
} else if !containsOnly(p, allowed) {
|
||||
return ErrInvalidMetadata
|
||||
}
|
||||
}
|
||||
|
||||
15
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
15
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@@ -26,12 +26,13 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -45,10 +46,6 @@ import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// TestingController is Compression with some helper methods necessary for testing.
|
||||
type TestingController interface {
|
||||
Compression
|
||||
@@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
|
||||
}
|
||||
if sampleEntry == nil {
|
||||
t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
|
||||
return
|
||||
}
|
||||
if targetEntry == nil {
|
||||
t.Fatalf("rewrite target not found")
|
||||
return
|
||||
}
|
||||
targetEntry.Offset = sampleEntry.Offset
|
||||
},
|
||||
@@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
|
||||
func randomContents(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = runes[rand.Intn(len(runes))]
|
||||
bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes))))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b[i] = runes[int(bi.Int64())]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
6
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
6
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
@@ -18,3 +18,9 @@ As a containerd sub-project, you will find the:
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
## Optional
|
||||
|
||||
By default, support for gogoproto is available along side the standard Google
|
||||
protobuf types.
|
||||
You can choose to leave gogo support out by using the `!no_gogo` build tag.
|
||||
|
||||
89
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
89
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
@@ -24,7 +24,6 @@ import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
@@ -33,8 +32,16 @@ import (
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
registry = make(map[reflect.Type]string)
|
||||
handlers []handler
|
||||
)
|
||||
|
||||
type handler interface {
|
||||
Marshaller(interface{}) func() ([]byte, error)
|
||||
Unmarshaller(interface{}) func([]byte) error
|
||||
TypeURL(interface{}) string
|
||||
GetType(url string) (reflect.Type, bool)
|
||||
}
|
||||
|
||||
// Definitions of common error types used throughout typeurl.
|
||||
//
|
||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||
@@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
return string(t.ProtoReflect().Descriptor().FullName()), nil
|
||||
case gogoproto.Message:
|
||||
return gogoproto.MessageName(t), nil
|
||||
default:
|
||||
for _, h := range handlers {
|
||||
if u := h.TypeURL(v); u != "" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
|
||||
}
|
||||
}
|
||||
@@ -149,12 +159,19 @@ func MarshalAny(v interface{}) (Any, error) {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(t)
|
||||
}
|
||||
case gogoproto.Message:
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return gogoproto.Marshal(t)
|
||||
}
|
||||
default:
|
||||
marshal = json.Marshal
|
||||
for _, h := range handlers {
|
||||
if m := h.Marshaller(v); m != nil {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return m()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if marshal == nil {
|
||||
marshal = json.Marshal
|
||||
}
|
||||
}
|
||||
|
||||
url, err := TypeURL(v)
|
||||
@@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||
}
|
||||
|
||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||
t, err := getTypeByUrl(typeURL)
|
||||
t, isProto, err := getTypeByUrl(typeURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
v = reflect.New(t.t).Interface()
|
||||
v = reflect.New(t).Interface()
|
||||
} else {
|
||||
// Validate interface type provided by client
|
||||
vURL, err := TypeURL(v)
|
||||
@@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error)
|
||||
}
|
||||
}
|
||||
|
||||
if t.isProto {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
err = proto.Unmarshal(value, t)
|
||||
case gogoproto.Message:
|
||||
err = gogoproto.Unmarshal(value, t)
|
||||
if isProto {
|
||||
pm, ok := v.(proto.Message)
|
||||
if ok {
|
||||
return v, proto.Unmarshal(value, pm)
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if unmarshal := h.Unmarshaller(v); unmarshal != nil {
|
||||
return v, unmarshal(value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
return v, err
|
||||
// fallback to json unmarshaller
|
||||
return v, json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
type urlType struct {
|
||||
t reflect.Type
|
||||
isProto bool
|
||||
}
|
||||
|
||||
func getTypeByUrl(url string) (urlType, error) {
|
||||
func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) {
|
||||
mu.RLock()
|
||||
for t, u := range registry {
|
||||
if u == url {
|
||||
mu.RUnlock()
|
||||
return urlType{
|
||||
t: t,
|
||||
}, nil
|
||||
return t, false, nil
|
||||
}
|
||||
}
|
||||
mu.RUnlock()
|
||||
// fallback to proto registry
|
||||
t := gogoproto.MessageType(url)
|
||||
if t != nil {
|
||||
return urlType{
|
||||
// get the underlying Elem because proto returns a pointer to the type
|
||||
t: t.Elem(),
|
||||
isProto: true,
|
||||
}, nil
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
if errors.Is(err, protoregistry.NotFound) {
|
||||
for _, h := range handlers {
|
||||
if t, isProto := h.GetType(url); t != nil {
|
||||
return t, isProto, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
}
|
||||
empty := mt.New().Interface()
|
||||
return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil
|
||||
return reflect.TypeOf(empty).Elem(), true, nil
|
||||
}
|
||||
|
||||
func tryDereference(v interface{}) reflect.Type {
|
||||
|
||||
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
//go:build !no_gogo
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
handlers = append(handlers, gogoHandler{})
|
||||
}
|
||||
|
||||
type gogoHandler struct{}
|
||||
|
||||
func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return func() ([]byte, error) {
|
||||
return gogoproto.Marshal(pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return func(dt []byte) error {
|
||||
return gogoproto.Unmarshal(dt, pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) TypeURL(v interface{}) string {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return gogoproto.MessageName(pm)
|
||||
}
|
||||
|
||||
func (gogoHandler) GetType(url string) (reflect.Type, bool) {
|
||||
t := gogoproto.MessageType(url)
|
||||
if t == nil {
|
||||
return nil, false
|
||||
}
|
||||
return t.Elem(), true
|
||||
}
|
||||
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@@ -109,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
@@ -316,12 +316,15 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary.
|
||||
func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error {
|
||||
ociConfig, configErr := src.OCIConfig(ctx)
|
||||
// Do not fail on configErr here, this might be an artifact
|
||||
// and maybe nothing needs this to be a container image and to process the config.
|
||||
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", configErr)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||
|
||||
@@ -331,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
// For a transitional period, this might trigger warnings because the Variant
|
||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||
// revert this check to only look for (OS, Architecture).
|
||||
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
|
||||
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
@@ -339,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
|
||||
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don'
|
||||
type dirImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.AlwaysSupportsSignatures
|
||||
|
||||
|
||||
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
@@ -3,6 +3,7 @@ package daemon
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
||||
}
|
||||
switch serverURL.Scheme {
|
||||
case "unix": // Nothing
|
||||
case "npipe": // Nothing
|
||||
case "http":
|
||||
hc := httpConfig()
|
||||
opts = append(opts, dockerclient.WithHTTPClient(hc))
|
||||
@@ -82,6 +84,11 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsc,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}, nil
|
||||
@@ -92,6 +99,11 @@ func httpConfig() *http.Client {
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: nil,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}
|
||||
|
||||
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
@@ -24,7 +24,6 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
@@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error {
|
||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
switch {
|
||||
case resp.StatusCode == http.StatusUnauthorized:
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
for _, c := range parseAuthHeader(resp.Header) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
@@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error {
|
||||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
|
||||
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -1056,6 +1056,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
|
||||
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
|
||||
if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err)
|
||||
}
|
||||
digestAlgorithm := desc.Digest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String())
|
||||
}
|
||||
|
||||
reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1065,6 +1074,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
|
||||
}
|
||||
actualDigest := digestAlgorithm.FromBytes(payload)
|
||||
if actualDigest != desc.Digest {
|
||||
return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String())
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -41,6 +41,7 @@ import (
|
||||
type dockerImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
ref dockerReference
|
||||
|
||||
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
||||
}
|
||||
return
|
||||
}
|
||||
if parts >= len(chunks) {
|
||||
errs <- errors.New("too many parts returned by the server")
|
||||
break
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: p,
|
||||
|
||||
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@@ -24,6 +24,7 @@ import (
|
||||
type Destination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
|
||||
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
@@ -3,6 +3,7 @@ package docker
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package stubs
|
||||
|
||||
import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing.
|
||||
type IgnoresOriginalOCIConfig struct{}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return nil
|
||||
}
|
||||
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
// wrapped provides the private.ImageDestination operations
|
||||
// for a destination that only implements types.ImageDestination
|
||||
type wrapped struct {
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
types.ImageDestination
|
||||
|
||||
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
@@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateOldDigest: list.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
return index.editInstances(editInstances)
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []Schema2ManifestDescriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
@@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
list.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
list.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
if editInstance.AddPlatform == nil {
|
||||
// Should we create a struct with empty fields instead?
|
||||
@@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if len(addedEntries) != 0 {
|
||||
// slices.Clone() here to ensure a private backing array;
|
||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
list.Manifests = append(slices.Clone(list.Manifests), addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
func (list *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
@@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
|
||||
return &Schema2List{*public}
|
||||
}
|
||||
|
||||
func (index *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
|
||||
func (list *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic))
|
||||
}
|
||||
|
||||
func (index *Schema2List) Clone() ListPublic {
|
||||
return index.CloneInternal()
|
||||
func (list *Schema2List) Clone() ListPublic {
|
||||
return list.CloneInternal()
|
||||
}
|
||||
|
||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
|
||||
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageSourceInternalOnly is the part of private.ImageSource that is not
|
||||
@@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface {
|
||||
// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
|
||||
// on unsupported formats.
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build !linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
||||
return d.unpackedDest.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -98,7 +99,7 @@ func ValidateScope(scope string) error {
|
||||
}
|
||||
|
||||
func validateScopeWindows(scope string) error {
|
||||
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
|
||||
matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope)
|
||||
if !matched {
|
||||
return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
|
||||
}
|
||||
@@ -119,3 +120,31 @@ func validateScopeNonWindows(scope string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseOCIReferenceName parses the image from the oci reference.
|
||||
func parseOCIReferenceName(image string) (img string, index int, err error) {
|
||||
index = -1
|
||||
if strings.HasPrefix(image, "@") {
|
||||
idx, err := strconv.Atoi(image[1:])
|
||||
if err != nil {
|
||||
return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err)
|
||||
}
|
||||
if idx < 0 {
|
||||
return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx)
|
||||
}
|
||||
index = idx
|
||||
} else {
|
||||
img = image
|
||||
}
|
||||
return img, index, nil
|
||||
}
|
||||
|
||||
// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists
|
||||
func ParseReferenceIntoElements(reference string) (string, string, int, error) {
|
||||
dir, image := SplitPathAndImage(reference)
|
||||
image, index, err := parseOCIReferenceName(image)
|
||||
if err != nil {
|
||||
return "", "", -1, err
|
||||
}
|
||||
return dir, image, index, nil
|
||||
}
|
||||
|
||||
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/reflink"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
type ociImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
@@ -37,6 +39,9 @@ type ociImageDestination struct {
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
|
||||
if ref.sourceIndex != -1 {
|
||||
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
var index *imgspecv1.Index
|
||||
if indexExists(ref) {
|
||||
var err error
|
||||
@@ -137,9 +142,21 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the
|
||||
// specific blob path determined by the blobDigest. The closed pointer indicates to the caller
|
||||
// whether blobFile has been closed or not.
|
||||
func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error {
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||
// On Windows, the “permissions of newly created files” argument to syscall.Open is
|
||||
@@ -147,26 +164,27 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
// always fails on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := blobFile.Chmod(0644); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
|
||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||
// need to explicitly close the file, since a rename won't otherwise work on Windows
|
||||
blobFile.Close()
|
||||
explicitClosed = true
|
||||
*closed = true
|
||||
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
@@ -299,6 +317,67 @@ func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options pri
|
||||
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
}
|
||||
|
||||
// PutBlobFromLocalFileOption is unused but may receive functionality in the future.
|
||||
type PutBlobFromLocalFileOption struct{}
|
||||
|
||||
// PutBlobFromLocalFile arranges the data from path to be used as blob with digest.
|
||||
// It computes, and returns, the digest and size of the used file.
|
||||
//
|
||||
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
|
||||
d, ok := dest.(*ociImageDestination)
|
||||
if !ok {
|
||||
return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||
}
|
||||
|
||||
succeeded := false
|
||||
blobFileClosed := false
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer func() {
|
||||
if !blobFileClosed {
|
||||
blobFile.Close()
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
srcFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
err = reflink.LinkOrCopy(srcFile, blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
_, err = blobFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
blobDigest, err := digest.FromReader(blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
fileInfo, err := blobFile.Stat()
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return blobDigest, fileInfo.Size(), nil
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
|
||||
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
@@ -61,22 +61,31 @@ type ociReference struct {
|
||||
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
||||
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
||||
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
||||
// If image=="", it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json
|
||||
// If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json.
|
||||
//
|
||||
// Must not be set if sourceIndex is set (the value is not -1).
|
||||
image string
|
||||
// If not -1, a zero-based index of an image in the manifest index. Valid only for sources.
|
||||
// Must not be set if image is set.
|
||||
sourceIndex int
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
||||
func ParseReference(reference string) (types.ImageReference, error) {
|
||||
dir, image := internal.SplitPathAndImage(reference)
|
||||
return NewReference(dir, image)
|
||||
dir, image, index, err := internal.ParseReferenceIntoElements(reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newReference(dir, image, index)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex.
|
||||
//
|
||||
// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used.
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) {
|
||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -90,7 +99,26 @@ func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
|
||||
if sourceIndex != -1 && sourceIndex < 0 {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex)
|
||||
}
|
||||
if sourceIndex != -1 && image != "" {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex)
|
||||
}
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil
|
||||
}
|
||||
|
||||
// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index.
|
||||
func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) {
|
||||
return newReference(dir, "", sourceIndex)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
//
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
return newReference(dir, image, -1)
|
||||
}
|
||||
|
||||
func (ref ociReference) Transport() types.ImageTransport {
|
||||
@@ -103,7 +131,10 @@ func (ref ociReference) Transport() types.ImageTransport {
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref ociReference) StringWithinTransport() string {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
if ref.sourceIndex == -1 {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
}
|
||||
return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex)
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
@@ -187,14 +218,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||
return imgspecv1.Descriptor{}, -1, err
|
||||
}
|
||||
|
||||
if ref.image == "" {
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
switch {
|
||||
case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references.
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
||||
ref.image, ref.sourceIndex)
|
||||
|
||||
case ref.sourceIndex != -1:
|
||||
if ref.sourceIndex >= len(index.Manifests) {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests))
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
} else {
|
||||
return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil
|
||||
|
||||
case ref.image != "":
|
||||
// if image specified, look through all manifests for a match
|
||||
var unsupportedMIMETypes []string
|
||||
for i, md := range index.Manifests {
|
||||
@@ -208,8 +243,16 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||
if len(unsupportedMIMETypes) != 0 {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
|
||||
default:
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||
|
||||
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package layout
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// This file is named reader.go for consistency with other transports’
|
||||
// handling of “image containers”, but we don’t actually need a stateful reader object.
|
||||
|
||||
// ListResult wraps the image reference and the manifest for loading
|
||||
type ListResult struct {
|
||||
Reference types.ImageReference
|
||||
ManifestDescriptor imgspecv1.Descriptor
|
||||
}
|
||||
|
||||
// List returns a slice of manifests included in the archive
|
||||
func List(dir string) ([]ListResult, error) {
|
||||
var res []ListResult
|
||||
|
||||
indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var index imgspecv1.Index
|
||||
if err := json.Unmarshal(indexJSON, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for manifestIndex, md := range index.Manifests {
|
||||
refName := md.Annotations[imgspecv1.AnnotationRefName]
|
||||
index := -1
|
||||
if refName == "" {
|
||||
index = manifestIndex
|
||||
}
|
||||
ref, err := newReference(dir, refName, index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating image reference: %w", err)
|
||||
}
|
||||
reference := ListResult{
|
||||
Reference: ref,
|
||||
ManifestDescriptor: md,
|
||||
}
|
||||
res = append(res, reference)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type openshiftImageDestination struct {
|
||||
@@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
|
||||
return d.docker.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.docker.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
@@ -744,6 +745,11 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
|
||||
// Enforce v2 format for drop-in-configs.
|
||||
dropIn, err := loadConfigFile(path, true)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
|
||||
}
|
||||
config.updateWithConfigurationFrom(dropIn)
|
||||
|
||||
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
@@ -3,6 +3,7 @@ package tlsclientconfig
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -36,12 +37,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
// Race with someone who deleted the
|
||||
// file after we read the directory's
|
||||
// list of contents?
|
||||
logrus.Warnf("error reading certificate %q: %v", fullPath, err)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return err
|
||||
|
||||
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
@@ -20,7 +20,7 @@ func (f *fulcioTrustRoot) validate() error {
|
||||
return errors.New("fulcio disabled at compile-time")
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
return nil, errors.New("fulcio disabled at compile-time")
|
||||
|
||||
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
@@ -13,3 +13,12 @@ func (err InvalidSignatureError) Error() string {
|
||||
func NewInvalidSignatureError(msg string) InvalidSignatureError {
|
||||
return InvalidSignatureError{msg: msg}
|
||||
}
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
@@ -40,15 +40,6 @@ type UntrustedRekorPayload struct {
|
||||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||
|
||||
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
@@ -10,6 +10,6 @@ import (
|
||||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
||||
}
|
||||
|
||||
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@@ -17,11 +17,13 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
srcImpl "github.com/containers/image/v5/internal/imagesource/impl"
|
||||
srcStubs "github.com/containers/image/v5/internal/imagesource/stubs"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@@ -31,6 +33,7 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -57,8 +60,9 @@ type storageImageDestination struct {
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||
manifest []byte // (Per-instance) manifest contents, or nil if not yet known.
|
||||
manifestMIMEType string // Valid if manifest != nil
|
||||
manifestDigest digest.Digest // Valid if manifest != nil
|
||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
@@ -108,8 +112,10 @@ type storageImageDestinationLockProtected struct {
|
||||
//
|
||||
// Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
|
||||
// to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
// Mapping from layer index to a TOC Digest.
|
||||
// If this is set, then either c/storage/pkg/chunked/toc.GetTOCDigest must have returned a value, or indexToDiffID must be set as well.
|
||||
indexToTOCDigest map[int]digest.Digest
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
|
||||
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
|
||||
@@ -121,6 +127,9 @@ type storageImageDestinationLockProtected struct {
|
||||
filenames map[digest.Digest]string
|
||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||
fileSizes map[digest.Digest]int64
|
||||
|
||||
// Config
|
||||
configDigest digest.Digest // "" if N/A or not known yet.
|
||||
}
|
||||
|
||||
// addedLayerInfo records data about a layer to use in this image.
|
||||
@@ -201,6 +210,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr)
|
||||
}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@@ -214,7 +235,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
return info, err
|
||||
}
|
||||
|
||||
if options.IsConfig || options.LayerIndex == nil {
|
||||
if options.IsConfig {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if s.lockProtected.configDigest != "" {
|
||||
return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q",
|
||||
s.lockProtected.configDigest.String(), info.Digest.String())
|
||||
}
|
||||
s.lockProtected.configDigest = info.Digest
|
||||
return info, nil
|
||||
}
|
||||
if options.LayerIndex == nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -315,6 +346,56 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
|
||||
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||
// The fallback _must not_ be done otherwise.
|
||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
|
||||
inputTOCDigest, err := toc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// The identity of partially-pulled layers is, as long as we keep compatibility with tar-like consumers,
|
||||
// unfixably ambiguous: there are two possible “views” of the same file (same compressed digest),
|
||||
// the traditional “view” that decompresses the primary stream and consumes a tar file,
|
||||
// and the partial-pull “view” that starts with the TOC.
|
||||
// The two “views” have two separate metadata sets and may refer to different parts of the blob for file contents;
|
||||
// the direct way to ensure they are consistent would be to read the full primary stream (and authenticate it against
|
||||
// the compressed digest), and ensure the metadata and layer contents exactly match the partially-pulled contents -
|
||||
// making the partial pull completely pointless.
|
||||
//
|
||||
// Instead, for partial-pull-capable layers (with inputTOCDigest set), we require the image to “commit”
|
||||
// to uncompressed layer digest values via the config's RootFS.DiffIDs array:
|
||||
// they are already naturally computed for traditionally-pulled layers, and for partially-pulled layers we
|
||||
// do the optimal partial pull, and then reconstruct the uncompressed tar stream just to (expensively) compute this digest.
|
||||
//
|
||||
// Layers which don’t support partial pulls (inputTOCDigest == "", incl. all schema1 layers) can be let through:
|
||||
// the partial pull code will either not engage, or consume the full layer; and the rules of indexToTOCDigest / layerIdentifiedByTOC
|
||||
// ensure the layer is identified by DiffID, i.e. using the traditional “view”.
|
||||
//
|
||||
// But if inputTOCDigest is set and the input image doesn't have RootFS.DiffIDs (the config is invalid for schema2/OCI),
|
||||
// don't allow a partial pull, and fall back to PutBlobWithOptions.
|
||||
//
|
||||
// (The user can opt out of the DiffID commitment checking by a c/storage option, giving up security for performance,
|
||||
// but we will still trigger the fall back here, and we will still enforce a DiffID match, so that the set of accepted images
|
||||
// is the same in both cases, and so that users are not tempted to set the c/storage option to allow accepting some invalid images.)
|
||||
var untrustedDiffID digest.Digest // "" if unknown
|
||||
udid, err := s.untrustedLayerDiffID(options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// PutBlobPartial is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: in PutBlobPartial, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
if inputTOCDigest != nil {
|
||||
return private.UploadedBlob{}, private.NewErrFallbackToOrdinaryLayerDownload(err)
|
||||
}
|
||||
untrustedDiffID = "" // A schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
untrustedDiffID = udid
|
||||
}
|
||||
|
||||
fetcher := zstdFetcher{
|
||||
chunkAccessor: chunkAccessor,
|
||||
ctx: ctx,
|
||||
@@ -351,35 +432,55 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
blobDigest := srcInfo.Digest
|
||||
|
||||
s.lock.Lock()
|
||||
if out.UncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
// Don’t set indexToTOCDigest on this path:
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
|
||||
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
|
||||
// That TOC is quite unlikely to match any other TOC value.
|
||||
if err := func() error { // A scope for defer
|
||||
defer s.lock.Unlock()
|
||||
|
||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
// For true partial pulls, c/storage decides whether to compute the uncompressed digest based on an option in storage.conf
|
||||
// (defaults to true, to avoid ambiguity.)
|
||||
// c/storage can also be configured, to consume a layer not prepared for partial pulls (primarily to allow composefs conversion),
|
||||
// and in that case it always consumes the full blob and always computes the uncompressed digest.
|
||||
if out.UncompressedDigest != "" {
|
||||
// This is centrally enforced later, in commitLayer, but because we have the value available,
|
||||
// we might just as well check immediately.
|
||||
if untrustedDiffID != "" && out.UncompressedDigest != untrustedDiffID {
|
||||
return fmt.Errorf("uncompressed digest of layer %q is %q, config claims %q", srcInfo.Digest.String(),
|
||||
out.UncompressedDigest.String(), untrustedDiffID.String())
|
||||
}
|
||||
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
|
||||
// If the whole layer has been consumed, chunked.GetDiffer is responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != "" {
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
}
|
||||
} else {
|
||||
// Sanity-check the defined rules for indexToTOCDigest.
|
||||
if inputTOCDigest == nil {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned a TOC-only identity for layer %q with no TOC digest", srcInfo.Digest.String())
|
||||
}
|
||||
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
} else {
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
s.lock.Unlock()
|
||||
|
||||
succeeded = true
|
||||
return private.UploadedBlob{
|
||||
@@ -417,22 +518,43 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
if err := blobDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
if options.TOCDigest != "" {
|
||||
useTOCDigest := false // If set, (options.TOCDigest != "" && options.LayerIndex != nil) AND we can use options.TOCDigest safely.
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if err := options.TOCDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
// Only consider using TOCDigest if we can avoid ambiguous image “views”, see the detailed comment in PutBlobPartial.
|
||||
_, err := s.untrustedLayerDiffID(*options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// options.TOCDigest is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("internal error: in TryReusingBlobWithOptions, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
logrus.Debugf("Not using TOC %q to look for layer reuse: %v", options.TOCDigest, err)
|
||||
// But don’t abort entirely, keep useTOCDigest = false, try a blobDigest match.
|
||||
default:
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
useTOCDigest = true
|
||||
}
|
||||
}
|
||||
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if options.SrcRef != nil && useTOCDigest {
|
||||
// Check if we have the layer in the underlying additional layer store.
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
||||
} else if err == nil {
|
||||
// Compare the long comment in PutBlobPartial. We assume that the Additional Layer Store will, somehow,
|
||||
// avoid layer “view” ambiguity.
|
||||
alsTOCDigest := aLayer.TOCDigest()
|
||||
if alsTOCDigest != options.TOCDigest {
|
||||
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
|
||||
@@ -505,13 +627,13 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
|
||||
s.lockProtected.blobDiffIDs[reused.Digest] = uncompressedDigest
|
||||
return true, reused, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if useTOCDigest {
|
||||
// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
|
||||
// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
|
||||
uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
|
||||
@@ -532,6 +654,11 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
if uncompressedDigest == "" && layers[0].UncompressedDigest != "" {
|
||||
// Determine an uncompressed digest if at all possible, to use a traditional image ID
|
||||
// and to maximize image reuse.
|
||||
uncompressedDigest = layers[0].UncompressedDigest
|
||||
}
|
||||
if uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
@@ -568,13 +695,22 @@ func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest,
|
||||
|
||||
// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
|
||||
type trustedLayerIdentityData struct {
|
||||
layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// This can only be true if c/storage/pkg/chunked/toc.GetTOCDigest returns a value.
|
||||
layerIdentifiedByTOC bool
|
||||
|
||||
diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
|
||||
tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
|
||||
blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
|
||||
}
|
||||
|
||||
// logString() prints a representation of trusted suitable identifying a layer in logs and errors.
|
||||
// The string is already quoted to expose malicious input and does not need to be quoted again.
|
||||
// Note that it does not include _all_ of the contents.
|
||||
func (trusted trustedLayerIdentityData) logString() string {
|
||||
return fmt.Sprintf("%q/%q/%q", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
}
|
||||
|
||||
// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
|
||||
// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
|
||||
// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
|
||||
@@ -785,23 +921,6 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
|
||||
// and an indication whether the input already has the shape of a layer ID.
|
||||
// It returns ("", false) if the layer is not found at all (which should never happen)
|
||||
func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
}
|
||||
return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||
//
|
||||
@@ -813,16 +932,15 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
|
||||
// must guarantee that, at any given time, at most one goroutine may execute
|
||||
// `commitLayer()`.
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
|
||||
// Already committed? Return early.
|
||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
var parentLayer string
|
||||
var parentLayer string // "" if no parent
|
||||
if index != 0 {
|
||||
// s.indexToStorageID can only be written by this function, and our caller
|
||||
// is responsible for ensuring it can be only be called by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
prev, ok := s.indexToStorageID[index-1]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
|
||||
@@ -830,18 +948,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
parentLayer = prev
|
||||
}
|
||||
|
||||
// Carry over the previous ID for empty non-base layers.
|
||||
if info.emptyLayer {
|
||||
s.indexToStorageID[index] = parentLayer
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
// The layerID refers either to the DiffID or the digest of the TOC.
|
||||
layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
// Collect trusted parameters of the layer.
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
// Check if the layer exists already and the caller just (incorrectly) forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
//
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
@@ -865,23 +982,54 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||
}
|
||||
|
||||
layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
s.lock.Lock()
|
||||
trusted, ok = s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
|
||||
}
|
||||
}
|
||||
|
||||
id := layerIDComponent
|
||||
if !layerIDComponentStandalone || parentLayer != "" {
|
||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
|
||||
// Ensure that we always see the same “view” of a layer, as identified by the layer’s uncompressed digest,
|
||||
// unless the user has explicitly opted out of this in storage.conf: see the more detailed explanation in PutBlobPartial.
|
||||
if trusted.diffID != "" {
|
||||
untrustedDiffID, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available for DiffID check", index)
|
||||
return true, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we could not have reused a TOC-identified layer nor have done a TOC-identified partial pull,
|
||||
// i.e. there is no other “view” to worry about. Sanity-check that we really see the only expected view.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// we _must_ fail if we used a TOC-identified layer - but PutBlobPartial should have already
|
||||
// refused to do a partial pull, so we are in an inconsistent state.
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return false, fmt.Errorf("internal error: layer %d for blob %s was identified by TOC, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
}
|
||||
// else a schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
} else if trusted.diffID != untrustedDiffID {
|
||||
return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID)
|
||||
}
|
||||
}
|
||||
|
||||
id := layerID(parentLayer, trusted)
|
||||
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
s.indexToStorageID[index] = layer.ID
|
||||
return false, nil
|
||||
}
|
||||
|
||||
layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
|
||||
layer, err := s.createNewLayer(index, trusted, parentLayer, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -892,32 +1040,62 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
|
||||
// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted)
|
||||
func layerID(parentID string, trusted trustedLayerIdentityData) string {
|
||||
var component string
|
||||
mustHash := false
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
// "@" is not a valid start of a digest.Digest.Encoded(), so this is unambiguous with the !layerIdentifiedByTOC case.
|
||||
// But we _must_ hash this below to get a Digest.Encoded()-formatted value.
|
||||
component = "@TOC=" + trusted.tocDigest.Encoded()
|
||||
mustHash = true
|
||||
} else {
|
||||
component = trusted.diffID.Encoded() // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
if parentID == "" && !mustHash {
|
||||
return component
|
||||
}
|
||||
return digest.Canonical.FromString(parentID + "+" + component).Encoded()
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be "").
|
||||
// If the layer cannot be committed yet, the function returns (nil, nil).
|
||||
func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
func (s *storageImageDestination) createNewLayer(index int, trusted trustedLayerIdentityData, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
s.lock.Lock()
|
||||
diffOutput, ok := s.lockProtected.diffOutputs[index]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
// If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// Typically, we compute a trusted DiffID value to authenticate the layer contents, see the detailed explanation
|
||||
// in PutBlobPartial. If the user has opted out of that, but we know a trusted DiffID value
|
||||
// (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// That way it will be persisted in storage even if the cache is deleted; also
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
|
||||
// the costly commit delay until a manifest is available).
|
||||
s.lock.Lock()
|
||||
if d, ok := s.lockProtected.indexToDiffID[index]; ok {
|
||||
diffOutput.UncompressedDigest = d
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic.
|
||||
if diffOutput.UncompressedDigest == "" && trusted.diffID != "" {
|
||||
diffOutput.UncompressedDigest = trusted.diffID
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
var untrustedUncompressedDigest digest.Digest
|
||||
if diffOutput.UncompressedDigest == "" {
|
||||
d, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d == "" {
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we should have !trusted.layerIdentifiedByTOC, i.e. we should have set
|
||||
// diffOutput.UncompressedDigest above in this function, at the very latest.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// commitLayer should have already refused this image when dealing with the “view” ambiguity.
|
||||
return nil, fmt.Errorf("internal error: layer %d for blob %s was partially-pulled with unknown UncompressedDigest, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
untrustedUncompressedDigest = d
|
||||
@@ -965,19 +1143,17 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// then we need to read the desired contents from a layer.
|
||||
var filename string
|
||||
var gotFilename bool
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest)
|
||||
if ok && trusted.blobDigest != "" {
|
||||
if trusted.blobDigest != "" {
|
||||
s.lock.Lock()
|
||||
filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
|
||||
}
|
||||
s.lock.Unlock()
|
||||
if !ok { // We have already determined newLayerID, so the data must have been available.
|
||||
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
|
||||
s.lock.Unlock()
|
||||
}
|
||||
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||
var trustedOriginalSize *int64
|
||||
if gotFilename {
|
||||
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost.
|
||||
} else {
|
||||
// Try to find the layer with contents matching the data we use.
|
||||
var layer *storage.Layer // = nil
|
||||
@@ -997,7 +1173,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
return nil, fmt.Errorf("layer for blob %s not found", trusted.logString())
|
||||
}
|
||||
|
||||
// Read the layer's contents.
|
||||
@@ -1007,7 +1183,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2)
|
||||
return nil, fmt.Errorf("reading layer %q for blob %s: %w", layer.ID, trusted.logString(), err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
@@ -1032,22 +1208,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
if trusted.diffID == "" && layer.UncompressedDigest != "" {
|
||||
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
|
||||
}
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
|
||||
// Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse.
|
||||
// But we don’t want to just use the size from the manifest if we never saw the compressed blob,
|
||||
// so that we don’t propagate mistakes / attacks.
|
||||
//
|
||||
// FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
|
||||
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
|
||||
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
// s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename.
|
||||
// So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit
|
||||
// to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to
|
||||
// this new layer copy can help.)
|
||||
if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 {
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
sizeCopy := layer.CompressedSize
|
||||
trustedOriginalSize = &sizeCopy
|
||||
} else {
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost.
|
||||
}
|
||||
|
||||
// Allow using the already-collected layer contents without extracting the layer again.
|
||||
//
|
||||
// This only matches against the uncompressed digest.
|
||||
// We don’t have the original compressed data here to trivially set filenames[layerDigest].
|
||||
// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
|
||||
// If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the
|
||||
// same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config),
|
||||
// we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate
|
||||
// between “original files” and “possibly uncompressed files”.
|
||||
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
||||
if trusted.diffID != "" {
|
||||
s.lock.Lock()
|
||||
@@ -1067,55 +1257,128 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: trustedOriginalDigest,
|
||||
OriginalSize: trustedOriginalSize, // nil in many cases
|
||||
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
|
||||
UncompressedDigest: trusted.diffID,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err)
|
||||
return nil, fmt.Errorf("adding layer with blob %s: %w", trusted.logString(), err)
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
|
||||
// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
|
||||
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
if s.manifest == nil {
|
||||
return "", nil
|
||||
// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed,
|
||||
// to allow using image.FromUnparsedImage.
|
||||
type uncommittedImageSource struct {
|
||||
srcImpl.Compat
|
||||
srcImpl.PropertyMethodsInitialize
|
||||
srcImpl.NoSignatures
|
||||
srcImpl.DoesNotAffectLayerInfosForCopy
|
||||
srcStubs.NoGetBlobAtInitialize
|
||||
|
||||
d *storageImageDestination
|
||||
}
|
||||
|
||||
func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource {
|
||||
s := &uncommittedImageSource{
|
||||
PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{
|
||||
HasThreadSafeGetBlob: true,
|
||||
}),
|
||||
NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()),
|
||||
|
||||
d: d,
|
||||
}
|
||||
s.Compat = srcImpl.AddCompat(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Reference() types.ImageReference {
|
||||
return u.d.Reference()
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
return u.d.manifest, u.d.manifestMIMEType, nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
blob, err := u.d.getConfigBlob(info)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil
|
||||
}
|
||||
|
||||
// errUntrustedLayerDiffIDNotYetAvailable is returned by untrustedLayerDiffID
|
||||
// if the value is not yet available (but it can be available after s.manifests is set).
|
||||
// This should only happen for external callers of the transport, not for c/image/copy.
|
||||
//
|
||||
// Callers of untrustedLayerDiffID before PutManifest must handle this error specially;
|
||||
// callers after PutManifest can use the default, reporting an internal error.
|
||||
var errUntrustedLayerDiffIDNotYetAvailable = errors.New("internal error: untrustedLayerDiffID has no value available and fallback was not implemented")
|
||||
|
||||
// untrustedLayerDiffIDUnknownError is returned by untrustedLayerDiffID
|
||||
// if the image’s format does not provide DiffIDs.
|
||||
type untrustedLayerDiffIDUnknownError struct {
|
||||
layerIndex int
|
||||
}
|
||||
|
||||
func (e untrustedLayerDiffIDUnknownError) Error() string {
|
||||
return fmt.Sprintf("DiffID value for layer %d is unknown or explicitly empty", e.layerIndex)
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// It may return two special errors, errUntrustedLayerDiffIDNotYetAvailable or untrustedLayerDiffIDUnknownError.
|
||||
//
|
||||
// WARNING: This function does not even validate that the returned digest has a valid format.
|
||||
// WARNING: We don’t _always_ validate this DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob,
|
||||
// nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set
|
||||
// by NoteOriginalOCIConfig and are not being updated any more;
|
||||
// or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
|
||||
if s.untrustedDiffIDValues == nil {
|
||||
mt := manifest.GuessMIMEType(s.manifest)
|
||||
if mt != imgspecv1.MediaTypeImageManifest {
|
||||
// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
|
||||
// and then use types.Image.OCIConfig so that we can parse the image.
|
||||
//
|
||||
// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
|
||||
// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
|
||||
// So it is not implemented yet.
|
||||
return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
|
||||
}
|
||||
man, err := manifest.FromBlob(s.manifest, mt)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parsing manifest: %w", err)
|
||||
// Typically, we expect untrustedDiffIDValues to be set by the generic copy code
|
||||
// via NoteOriginalOCIConfig; this is a compatibility fallback for external callers
|
||||
// of the public types.ImageDestination.
|
||||
if s.manifest == nil {
|
||||
return "", errUntrustedLayerDiffIDNotYetAvailable
|
||||
}
|
||||
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
ctx := context.Background() // This is all happening in memory, no need to worry about cancellation.
|
||||
unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil)
|
||||
sourced, err := image.FromUnparsedImage(ctx, nil, unparsed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", fmt.Errorf("parsing image to be committed: %w", err)
|
||||
}
|
||||
configOCI, err := sourced.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("obtaining config of image to be committed: %w", err)
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return "", err
|
||||
}
|
||||
s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(configOCI)
|
||||
}
|
||||
|
||||
// Let entirely empty / missing diffIDs through; but if the array does exist, expect it to contain an entry for every layer,
|
||||
// and fail hard on missing entries. This tries to account for completely naive image producers who just don’t fill DiffID,
|
||||
// while still detecting incorrectly-built / confused images.
|
||||
//
|
||||
// schema1 images don’t have DiffID values in the config.
|
||||
// Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values, so treat arrays of all-empty
|
||||
// values as “DiffID unknown”.
|
||||
// For schema 1, it is important to exit here, before the layerIndex >= len(s.untrustedDiffIDValues)
|
||||
// check, because the format conversion from schema1 to OCI used to compute untrustedDiffIDValues
|
||||
// changes the number of layres (drops items with Schema1V1Compatibility.ThrowAway).
|
||||
if !slices.ContainsFunc(s.untrustedDiffIDValues, func(d digest.Digest) bool {
|
||||
return d != ""
|
||||
}) {
|
||||
return "", untrustedLayerDiffIDUnknownError{
|
||||
layerIndex: layerIndex,
|
||||
}
|
||||
}
|
||||
if layerIndex >= len(s.untrustedDiffIDValues) {
|
||||
@@ -1124,6 +1387,15 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
||||
return s.untrustedDiffIDValues[layerIndex], nil
|
||||
}
|
||||
|
||||
// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config.
|
||||
// The caller must ensure s.lock does not need to be held.
|
||||
func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) {
|
||||
s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
}
|
||||
}
|
||||
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
@@ -1131,7 +1403,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
||||
func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
|
||||
if len(s.manifest) == 0 {
|
||||
if s.manifest == nil {
|
||||
return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
|
||||
}
|
||||
toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
|
||||
@@ -1159,7 +1431,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||
}
|
||||
}
|
||||
// Find the list of layer blobs.
|
||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||
man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
@@ -1193,29 +1465,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||
imgOptions.CreationDate = *inspect.Created
|
||||
}
|
||||
|
||||
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
||||
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
||||
dataBlobs := set.New[digest.Digest]()
|
||||
for blob := range s.lockProtected.filenames {
|
||||
dataBlobs.Add(blob)
|
||||
}
|
||||
for _, layerBlob := range layerBlobs {
|
||||
dataBlobs.Delete(layerBlob.Digest)
|
||||
}
|
||||
for _, blob := range dataBlobs.Values() {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[blob])
|
||||
// Set up to save the config as a data item. Since we only share layers, the config should be in a file.
|
||||
if s.lockProtected.configDigest != "" {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest])
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
|
||||
return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err)
|
||||
}
|
||||
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
|
||||
Key: blob.String(),
|
||||
Key: s.lockProtected.configDigest.String(),
|
||||
Data: v,
|
||||
Digest: digest.Canonical.FromBytes(v),
|
||||
})
|
||||
}
|
||||
// Set up to save the options.UnparsedToplevel's manifest if it differs from
|
||||
// the per-platform one, which is saved below.
|
||||
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
if !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
manifestDigest, err := manifest.Digest(toplevelManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("digesting top-level manifest: %w", err)
|
||||
@@ -1370,6 +1634,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
|
||||
return err
|
||||
}
|
||||
s.manifest = bytes.Clone(manifestBlob)
|
||||
if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil
|
||||
s.manifest = []byte{}
|
||||
}
|
||||
s.manifestMIMEType = manifest.GuessMIMEType(s.manifest)
|
||||
s.manifestDigest = digest
|
||||
return nil
|
||||
}
|
||||
@@ -1392,7 +1660,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
|
||||
if instanceDigest == nil {
|
||||
s.signatures = sigblob
|
||||
s.metadata.SignatureSizes = sizes
|
||||
if len(s.manifest) > 0 {
|
||||
if s.manifest != nil {
|
||||
manifestDigest := s.manifestDigest
|
||||
instanceDigest = &manifestDigest
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
@@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
|
||||
}
|
||||
if s.id == "" {
|
||||
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
// %.0w makes the error visible to error.Unwrap() without including any text.
|
||||
// ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause.
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
}
|
||||
if loadedImage == nil {
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
|
||||
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
@@ -35,13 +35,14 @@ type storageImageSource struct {
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.NoGetBlobAtInitialize
|
||||
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
cachedManifestMIMEType string // Valid if cachedManifest != nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
}
|
||||
|
||||
// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
|
||||
@@ -247,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
}
|
||||
return blob, manifest.GuessMIMEType(blob), err
|
||||
}
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
// The manifest is stored as a big data item.
|
||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||
if s.imageRef.named != nil {
|
||||
@@ -267,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
}
|
||||
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
||||
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest)
|
||||
}
|
||||
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
|
||||
return s.cachedManifest, s.cachedManifestMIMEType, err
|
||||
}
|
||||
|
||||
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
||||
|
||||
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@@ -6,7 +6,7 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 33
|
||||
VersionMinor = 34
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
||||
2
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/utils"
|
||||
"go.mozilla.org/pkcs7"
|
||||
"github.com/smallstep/pkcs7"
|
||||
)
|
||||
|
||||
type pkcs7KeyWrapper struct {
|
||||
|
||||
4
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
4
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@@ -17,13 +17,13 @@ env:
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
###
|
||||
FEDORA_NAME: "fedora-39"
|
||||
FEDORA_NAME: "fedora-41"
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
|
||||
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/Makefile
generated
vendored
2
vendor/github.com/containers/storage/Makefile
generated
vendored
@@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
|
||||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 1.61.0
|
||||
GOLANGCI_LINT_VERSION := 1.63.4
|
||||
|
||||
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@@ -1 +1 @@
|
||||
1.56.0
|
||||
1.57.1
|
||||
|
||||
8
vendor/github.com/containers/storage/check.go
generated
vendored
8
vendor/github.com/containers/storage/check.go
generated
vendored
@@ -80,7 +80,7 @@ type CheckOptions struct {
|
||||
// layer to the contents that we'd expect it to have to ignore certain
|
||||
// discrepancies
|
||||
type checkIgnore struct {
|
||||
ownership, timestamps, permissions bool
|
||||
ownership, timestamps, permissions, filetype bool
|
||||
}
|
||||
|
||||
// CheckMost returns a CheckOptions with mostly just "quick" checks enabled.
|
||||
@@ -139,8 +139,10 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
|
||||
if strings.Contains(o, "ignore_chown_errors=true") {
|
||||
ignore.ownership = true
|
||||
}
|
||||
if strings.HasPrefix(o, "force_mask=") {
|
||||
if strings.Contains(o, "force_mask=") {
|
||||
ignore.ownership = true
|
||||
ignore.permissions = true
|
||||
ignore.filetype = true
|
||||
}
|
||||
}
|
||||
for o := range s.pullOptions {
|
||||
@@ -833,7 +835,7 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
|
||||
// compareFileInfo returns a string summarizing what's different between the two checkFileInfos
|
||||
func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string {
|
||||
var comparison []string
|
||||
if a.typeflag != b.typeflag {
|
||||
if a.typeflag != b.typeflag && !ignore.filetype {
|
||||
comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag))
|
||||
}
|
||||
if idmap != nil && !idmap.Empty() {
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@@ -776,3 +776,8 @@ func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
||||
func (a *Driver) SupportsShifting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@@ -673,3 +673,8 @@ func (d *Driver) ListLayers() ([]string, error) {
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/storage/drivers/chown_darwin.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/chown_darwin.go
generated
vendored
@@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
|
||||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
capability, err := system.Lgetxattr(path, "security.capability")
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
|
||||
return fmt.Errorf("%s: %w", os.Args[0], err)
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
@@ -101,7 +101,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
|
||||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
cap, err := system.Lgetxattr(path, "security.capability")
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
|
||||
return fmt.Errorf("%s: %w", os.Args[0], err)
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
@@ -106,7 +106,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
|
||||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
@@ -279,7 +279,7 @@ func doCopyXattrs(srcPath, dstPath string) error {
|
||||
}
|
||||
|
||||
xattrs, err := system.Llistxattr(srcPath)
|
||||
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
24
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
24
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
@@ -81,6 +82,23 @@ type ApplyDiffWithDifferOpts struct {
|
||||
Flags map[string]interface{}
|
||||
}
|
||||
|
||||
// DedupArgs contains the information to perform storage deduplication.
|
||||
type DedupArgs struct {
|
||||
// Layers is the list of layers to deduplicate.
|
||||
Layers []string
|
||||
|
||||
// Options that are passed directly to the pkg/dedup.DedupDirs function.
|
||||
Options dedup.DedupOptions
|
||||
}
|
||||
|
||||
// DedupResult contains the result of the Dedup() call.
|
||||
type DedupResult struct {
|
||||
// Deduped represents the total number of bytes saved by deduplication.
|
||||
// This value accounts also for all previously deduplicated data, not only the savings
|
||||
// from the last run.
|
||||
Deduped uint64
|
||||
}
|
||||
|
||||
// InitFunc initializes the storage driver.
|
||||
type InitFunc func(homedir string, options Options) (Driver, error)
|
||||
|
||||
@@ -139,6 +157,8 @@ type ProtoDriver interface {
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
AdditionalImageStores() []string
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
Dedup(DedupArgs) (DedupResult, error)
|
||||
}
|
||||
|
||||
// DiffDriver is the interface to use to implement graph diffs
|
||||
@@ -211,8 +231,8 @@ const (
|
||||
// DifferOutputFormatDir means the output is a directory and it will
|
||||
// keep the original layout.
|
||||
DifferOutputFormatDir = iota
|
||||
// DifferOutputFormatFlat will store the files by their checksum, in the form
|
||||
// checksum[0:2]/checksum[2:]
|
||||
// DifferOutputFormatFlat will store the files by their checksum, per
|
||||
// pkg/chunked/internal/composefs.RegularFilePathForValidatedDigest.
|
||||
DifferOutputFormatFlat
|
||||
)
|
||||
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/overlay/check_116.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/overlay/check_116.go
generated
vendored
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func scanForMountProgramIndicators(home string) (detected bool, err error) {
|
||||
@@ -28,7 +27,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) {
|
||||
}
|
||||
if d.IsDir() {
|
||||
xattrs, err := system.Llistxattr(path)
|
||||
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
for _, xattr := range xattrs {
|
||||
|
||||
4
vendor/github.com/containers/storage/drivers/overlay/composefs.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/overlay/composefs.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build linux && cgo
|
||||
//go:build linux
|
||||
|
||||
package overlay
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
composeFsHelperErr error
|
||||
|
||||
// skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel
|
||||
// does not support it. Mounting directly via a file will be supported in kernel 6.12.
|
||||
// does not support it. Mounting directly via a file is supported from Linux 6.12.
|
||||
skipMountViaFile atomic.Bool
|
||||
)
|
||||
|
||||
|
||||
21
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
21
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/drivers/overlayutils"
|
||||
"github.com/containers/storage/drivers/quota"
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
@@ -1096,6 +1097,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
|
||||
}
|
||||
|
||||
if d.options.forceMask != nil {
|
||||
st.Mode |= os.ModeDir
|
||||
if err := idtools.SetContainersOverrideXattr(diff, st); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2740,3 +2742,22 @@ func getMappedMountRoot(path string) string {
|
||||
}
|
||||
return dirName
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
var dirs []string
|
||||
for _, layer := range req.Layers {
|
||||
dir, _, inAdditionalStore := d.dir2(layer, false)
|
||||
if inAdditionalStore {
|
||||
continue
|
||||
}
|
||||
if err := fileutils.Exists(dir); err == nil {
|
||||
dirs = append(dirs, filepath.Join(dir, "diff"))
|
||||
}
|
||||
}
|
||||
r, err := dedup.DedupDirs(dirs, req.Options)
|
||||
if err != nil {
|
||||
return graphdriver.DedupResult{}, err
|
||||
}
|
||||
return graphdriver.DedupResult{Deduped: r.Deduped}, nil
|
||||
}
|
||||
|
||||
23
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
23
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
//go:build linux && !cgo
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func openComposefsMount(dataDir string) (int, error) {
|
||||
return 0, fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func getComposeFsHelper() (string, error) {
|
||||
return "", fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func mountComposefsBlob(dataDir, mountPoint string) error {
|
||||
return fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
|
||||
return fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
2
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !exclude_graphdriver_overlay && linux && cgo
|
||||
//go:build !exclude_graphdriver_overlay && linux
|
||||
|
||||
package register
|
||||
|
||||
|
||||
17
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
17
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
@@ -348,3 +349,19 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string,
|
||||
func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
|
||||
return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
var dirs []string
|
||||
for _, layer := range req.Layers {
|
||||
dir := d.dir2(layer, false)
|
||||
if err := fileutils.Exists(dir); err == nil {
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
}
|
||||
r, err := dedup.DedupDirs(dirs, req.Options)
|
||||
if err != nil {
|
||||
return graphdriver.DedupResult{}, err
|
||||
}
|
||||
return graphdriver.DedupResult{Deduped: r.Deduped}, nil
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@@ -975,6 +975,11 @@ func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
||||
// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
|
||||
// matching those in toContainer to matching those in toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@@ -511,3 +511,8 @@ func (d *Driver) ListLayers() ([]string, error) {
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
||||
163
vendor/github.com/containers/storage/internal/dedup/dedup.go
generated
vendored
Normal file
163
vendor/github.com/containers/storage/internal/dedup/dedup.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
package dedup
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io/fs"
|
||||
"sync"
|
||||
|
||||
"github.com/opencontainers/selinux/pkg/pwalkdir"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var notSupported = errors.New("reflinks are not supported on this platform")
|
||||
|
||||
const (
|
||||
DedupHashInvalid DedupHashMethod = iota
|
||||
DedupHashCRC
|
||||
DedupHashFileSize
|
||||
DedupHashSHA256
|
||||
)
|
||||
|
||||
type DedupHashMethod int
|
||||
|
||||
type DedupOptions struct {
|
||||
// HashMethod is the hash function to use to find identical files
|
||||
HashMethod DedupHashMethod
|
||||
}
|
||||
|
||||
type DedupResult struct {
|
||||
// Deduped represents the total number of bytes saved by deduplication.
|
||||
// This value accounts also for all previously deduplicated data, not only the savings
|
||||
// from the last run.
|
||||
Deduped uint64
|
||||
}
|
||||
|
||||
func getFileChecksum(hashMethod DedupHashMethod, path string, info fs.FileInfo) (string, error) {
|
||||
switch hashMethod {
|
||||
case DedupHashInvalid:
|
||||
return "", fmt.Errorf("invalid hash method: %v", hashMethod)
|
||||
case DedupHashFileSize:
|
||||
return fmt.Sprintf("%v", info.Size()), nil
|
||||
case DedupHashSHA256:
|
||||
return readAllFile(path, info, func(buf []byte) (string, error) {
|
||||
h := sha256.New()
|
||||
if _, err := h.Write(buf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(h.Sum(nil)), nil
|
||||
})
|
||||
case DedupHashCRC:
|
||||
return readAllFile(path, info, func(buf []byte) (string, error) {
|
||||
c := crc64.New(crc64.MakeTable(crc64.ECMA))
|
||||
if _, err := c.Write(buf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
bufRet := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(bufRet, c.Sum64())
|
||||
return string(bufRet), nil
|
||||
})
|
||||
default:
|
||||
return "", fmt.Errorf("unknown hash method: %v", hashMethod)
|
||||
}
|
||||
}
|
||||
|
||||
type pathsLocked struct {
|
||||
paths []string
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
|
||||
res := DedupResult{}
|
||||
hashToPaths := make(map[string]*pathsLocked)
|
||||
lock := sync.Mutex{} // protects `hashToPaths` and `res`
|
||||
|
||||
dedup, err := newDedupFiles()
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
logrus.Debugf("Deduping directory %s", dir)
|
||||
if err := pwalkdir.Walk(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := uint64(info.Size())
|
||||
if size == 0 {
|
||||
// do not bother with empty files
|
||||
return nil
|
||||
}
|
||||
|
||||
// the file was already deduplicated
|
||||
if visited, err := dedup.isFirstVisitOf(info); err != nil {
|
||||
return err
|
||||
} else if visited {
|
||||
return nil
|
||||
}
|
||||
|
||||
h, err := getFileChecksum(options.HashMethod, path, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
item, foundItem := hashToPaths[h]
|
||||
if !foundItem {
|
||||
item = &pathsLocked{paths: []string{path}}
|
||||
hashToPaths[h] = item
|
||||
lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
item.lock.Lock()
|
||||
lock.Unlock()
|
||||
|
||||
dedupBytes, err := func() (uint64, error) { // function to have a scope for the defer statement
|
||||
defer item.lock.Unlock()
|
||||
|
||||
var dedupBytes uint64
|
||||
for _, src := range item.paths {
|
||||
deduped, err := dedup.dedup(src, path, info)
|
||||
if err == nil && deduped > 0 {
|
||||
logrus.Debugf("Deduped %q -> %q (%d bytes)", src, path, deduped)
|
||||
dedupBytes += deduped
|
||||
break
|
||||
}
|
||||
logrus.Debugf("Failed to deduplicate: %v", err)
|
||||
if errors.Is(err, notSupported) {
|
||||
return dedupBytes, err
|
||||
}
|
||||
}
|
||||
if dedupBytes == 0 {
|
||||
item.paths = append(item.paths, path)
|
||||
}
|
||||
return dedupBytes, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
res.Deduped += dedupBytes
|
||||
lock.Unlock()
|
||||
return nil
|
||||
}); err != nil {
|
||||
// if reflinks are not supported, return immediately without errors
|
||||
if errors.Is(err, notSupported) {
|
||||
return res, nil
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
139
vendor/github.com/containers/storage/internal/dedup/dedup_linux.go
generated
vendored
Normal file
139
vendor/github.com/containers/storage/internal/dedup/dedup_linux.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
package dedup
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type deviceInodePair struct {
|
||||
dev uint64
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type dedupFiles struct {
|
||||
lock sync.Mutex
|
||||
visitedInodes map[deviceInodePair]struct{}
|
||||
}
|
||||
|
||||
func newDedupFiles() (*dedupFiles, error) {
|
||||
return &dedupFiles{
|
||||
visitedInodes: make(map[deviceInodePair]struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *dedupFiles) recordInode(dev, ino uint64) (bool, error) {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
|
||||
di := deviceInodePair{
|
||||
dev: dev,
|
||||
ino: ino,
|
||||
}
|
||||
|
||||
_, visited := d.visitedInodes[di]
|
||||
d.visitedInodes[di] = struct{}{}
|
||||
return visited, nil
|
||||
}
|
||||
|
||||
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
|
||||
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("unable to get raw syscall.Stat_t data")
|
||||
}
|
||||
return d.recordInode(uint64(st.Dev), st.Ino)
|
||||
}
|
||||
|
||||
// dedup deduplicates the file at src path to dst path
|
||||
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
|
||||
srcFile, err := os.OpenFile(src, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
dstFile, err := os.OpenFile(dst, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to open destination file: %w", err)
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
stSrc, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
sSrc, ok := stSrc.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
|
||||
}
|
||||
sDest, ok := fiDst.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
|
||||
}
|
||||
if sSrc.Dev == sDest.Dev && sSrc.Ino == sDest.Ino {
|
||||
// same inode, we are dealing with a hard link, no need to deduplicate
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
value := unix.FileDedupeRange{
|
||||
Src_offset: 0,
|
||||
Src_length: uint64(stSrc.Size()),
|
||||
Info: []unix.FileDedupeRangeInfo{
|
||||
{
|
||||
Dest_fd: int64(dstFile.Fd()),
|
||||
Dest_offset: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value)
|
||||
if err == nil {
|
||||
return uint64(value.Info[0].Bytes_deduped), nil
|
||||
}
|
||||
|
||||
if errors.Is(err, unix.ENOTSUP) {
|
||||
return 0, notSupported
|
||||
}
|
||||
return 0, fmt.Errorf("failed to clone file %q: %w", src, err)
|
||||
}
|
||||
|
||||
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
|
||||
size := info.Size()
|
||||
if size == 0 {
|
||||
return fn(nil)
|
||||
}
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if size < 4096 {
|
||||
// small file, read it all
|
||||
data := make([]byte, size)
|
||||
_, err = io.ReadFull(file, data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fn(data)
|
||||
}
|
||||
|
||||
mmap, err := unix.Mmap(int(file.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_PRIVATE)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to mmap file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = unix.Munmap(mmap)
|
||||
}()
|
||||
|
||||
_ = unix.Madvise(mmap, unix.MADV_SEQUENTIAL)
|
||||
|
||||
return fn(mmap)
|
||||
}
|
||||
27
vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go
generated
vendored
Normal file
27
vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
//go:build !linux
|
||||
|
||||
package dedup
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
type dedupFiles struct{}
|
||||
|
||||
func newDedupFiles() (*dedupFiles, error) {
|
||||
return nil, notSupported
|
||||
}
|
||||
|
||||
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
|
||||
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
|
||||
return false, notSupported
|
||||
}
|
||||
|
||||
// dedup deduplicates the file at src path to dst path
|
||||
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
|
||||
return 0, notSupported
|
||||
}
|
||||
|
||||
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
|
||||
return "", notSupported
|
||||
}
|
||||
37
vendor/github.com/containers/storage/layers.go
generated
vendored
37
vendor/github.com/containers/storage/layers.go
generated
vendored
@@ -336,6 +336,9 @@ type rwLayerStore interface {
|
||||
|
||||
// Clean up unreferenced layers
|
||||
GarbageCollect() error
|
||||
|
||||
// Dedup deduplicates layers in the store.
|
||||
dedup(drivers.DedupArgs) (drivers.DedupResult, error)
|
||||
}
|
||||
|
||||
type multipleLockFile struct {
|
||||
@@ -913,23 +916,32 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
var incompleteDeletionErrors error // = nil
|
||||
var layersToDelete []*Layer
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if layerHasIncompleteFlag(layer) {
|
||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||
err := r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
modifiedLocations |= layerLocation(layer)
|
||||
// Important: Do not call r.deleteInternal() here. It modifies r.layers
|
||||
// which causes unexpected side effects while iterating over r.layers here.
|
||||
// The range loop has no idea that the underlying elements where shifted
|
||||
// around.
|
||||
layersToDelete = append(layersToDelete, layer)
|
||||
}
|
||||
}
|
||||
// Now actually delete the layers
|
||||
for _, layer := range layersToDelete {
|
||||
logrus.Warnf("Found incomplete layer %q, deleting it", layer.ID)
|
||||
err := r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
modifiedLocations |= layerLocation(layer)
|
||||
}
|
||||
if err := r.saveLayers(modifiedLocations); err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -2592,6 +2604,11 @@ func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
|
||||
return r.layersByDigestMap(r.bytocsum, d)
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) dedup(req drivers.DedupArgs) (drivers.DedupResult, error) {
|
||||
return r.driver.Dedup(req)
|
||||
}
|
||||
|
||||
func closeAll(closes ...func() error) (rErr error) {
|
||||
for _, f := range closes {
|
||||
if err := f(); err != nil {
|
||||
|
||||
55
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
55
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@@ -78,6 +78,7 @@ const (
|
||||
windows = "windows"
|
||||
darwin = "darwin"
|
||||
freebsd = "freebsd"
|
||||
linux = "linux"
|
||||
)
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
@@ -427,7 +428,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||
}
|
||||
for _, xattr := range []string{"security.capability", "security.ima"} {
|
||||
capability, err := system.Lgetxattr(path, xattr)
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
|
||||
return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
|
||||
}
|
||||
if capability != nil {
|
||||
@@ -440,7 +441,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||
// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
|
||||
func readUserXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||
xattrs, err := system.Llistxattr(path)
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
|
||||
return err
|
||||
}
|
||||
for _, key := range xattrs {
|
||||
@@ -655,12 +656,20 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
hdrInfo := hdr.FileInfo()
|
||||
|
||||
typeFlag := hdr.Typeflag
|
||||
mask := hdrInfo.Mode()
|
||||
|
||||
// update also the implementation of ForceMask in pkg/chunked
|
||||
if forceMask != nil {
|
||||
mask = *forceMask
|
||||
// If we have a forceMask, force the real type to either be a directory,
|
||||
// a link, or a regular file.
|
||||
if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink {
|
||||
typeFlag = tar.TypeReg
|
||||
}
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
switch typeFlag {
|
||||
case tar.TypeDir:
|
||||
// Create directory unless it exists as a directory already.
|
||||
// In that case we just want to merge the two
|
||||
@@ -728,16 +737,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
|
||||
value := idtools.Stat{
|
||||
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
|
||||
Mode: hdrInfo.Mode() & 0o7777,
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Lchown is not supported on Windows.
|
||||
if Lchown && runtime.GOOS != windows {
|
||||
if chownOpts == nil {
|
||||
@@ -793,18 +792,30 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
continue
|
||||
}
|
||||
if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil {
|
||||
if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
|
||||
// We ignore errors here because not all graphdrivers support
|
||||
// xattrs *cough* old versions of AUFS *cough*. However only
|
||||
// ENOTSUP should be emitted in that case, otherwise we still
|
||||
// bail. We also ignore EPERM errors if we are running in a
|
||||
// user namespace.
|
||||
if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
|
||||
// Ignore specific error cases:
|
||||
// - ENOTSUP: Expected for graphdrivers lacking extended attribute support:
|
||||
// - Legacy AUFS versions
|
||||
// - FreeBSD with unsupported namespaces (trusted, security)
|
||||
// - EPERM: Expected when operating within a user namespace
|
||||
// All other errors will cause a failure.
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") {
|
||||
value := idtools.Stat{
|
||||
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
|
||||
Mode: hdrInfo.Mode(),
|
||||
Major: int(hdr.Devmajor),
|
||||
Minor: int(hdr.Devminor),
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We defer setting flags on directories until the end of
|
||||
@@ -1149,11 +1160,11 @@ loop:
|
||||
}
|
||||
|
||||
if options.ForceMask != nil {
|
||||
value := idtools.Stat{Mode: 0o755}
|
||||
value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)}
|
||||
if rootHdr != nil {
|
||||
value.IDs.UID = rootHdr.Uid
|
||||
value.IDs.GID = rootHdr.Gid
|
||||
value.Mode = os.FileMode(rootHdr.Mode)
|
||||
value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode)
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
|
||||
return err
|
||||
@@ -1379,7 +1390,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
|
||||
uid, gid = hdr.Uid, hdr.Gid
|
||||
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) == 3 {
|
||||
if len(attrs) >= 3 {
|
||||
val, err := strconv.ParseUint(attrs[0], 10, 32)
|
||||
if err != nil {
|
||||
uid = int(val)
|
||||
|
||||
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
@@ -270,6 +270,7 @@ type FileInfo struct {
|
||||
capability []byte
|
||||
added bool
|
||||
xattrs map[string]string
|
||||
target string
|
||||
}
|
||||
|
||||
// LookUp looks up the file information of a file.
|
||||
@@ -336,6 +337,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
// back mtime
|
||||
if statDifferent(oldStat, oldInfo, newStat, info) ||
|
||||
!bytes.Equal(oldChild.capability, newChild.capability) ||
|
||||
oldChild.target != newChild.target ||
|
||||
!reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
@@ -390,6 +392,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo {
|
||||
name: string(os.PathSeparator),
|
||||
idMappings: idMappings,
|
||||
children: make(map[string]*FileInfo),
|
||||
target: "",
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
11
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
@@ -79,6 +79,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
idMappings: root.idMappings,
|
||||
target: "",
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
@@ -87,11 +88,11 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
}
|
||||
info.stat = stat
|
||||
info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
xattrs, err := system.Llistxattr(cpath)
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
for _, key := range xattrs {
|
||||
@@ -110,6 +111,12 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
info.xattrs[key] = string(value)
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
info.target, err = os.Readlink(cpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@@ -47,7 +47,7 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error
|
||||
// This should be used to prevent a potential attacker from manipulating `dest`
|
||||
// such that it would provide access to files outside of `dest` through things
|
||||
// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
|
||||
// sanitizing symlinks in this manner is inherrently racey:
|
||||
// sanitizing symlinks in this manner is inherently racey:
|
||||
// ref: CVE-2018-15664
|
||||
func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
|
||||
return untarHandler(tarArchive, dest, options, true, root)
|
||||
|
||||
12
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/docker/go-units"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
@@ -710,7 +710,7 @@ func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([
|
||||
switch format {
|
||||
case graphdriver.DifferOutputFormatDir:
|
||||
case graphdriver.DifferOutputFormatFlat:
|
||||
entries, err = makeEntriesFlat(entries)
|
||||
entries, err = makeEntriesFlat(entries, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -848,12 +848,12 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *minimal.FileMetadata) (string, string, int64, error) {
|
||||
return c.findDigestInternal(chunk.ChunkDigest)
|
||||
}
|
||||
|
||||
func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
var toc internal.TOC
|
||||
func unmarshalToc(manifest []byte) (*minimal.TOC, error) {
|
||||
var toc minimal.TOC
|
||||
|
||||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
|
||||
@@ -864,7 +864,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
|
||||
case "entries":
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
var m minimal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch strings.ToLower(field) {
|
||||
case "type":
|
||||
|
||||
18
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
@@ -4,18 +4,18 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeReg = internal.TypeReg
|
||||
TypeChunk = internal.TypeChunk
|
||||
TypeLink = internal.TypeLink
|
||||
TypeChar = internal.TypeChar
|
||||
TypeBlock = internal.TypeBlock
|
||||
TypeDir = internal.TypeDir
|
||||
TypeFifo = internal.TypeFifo
|
||||
TypeSymlink = internal.TypeSymlink
|
||||
TypeReg = minimal.TypeReg
|
||||
TypeChunk = minimal.TypeChunk
|
||||
TypeLink = minimal.TypeLink
|
||||
TypeChar = minimal.TypeChar
|
||||
TypeBlock = minimal.TypeBlock
|
||||
TypeDir = minimal.TypeDir
|
||||
TypeFifo = minimal.TypeFifo
|
||||
TypeSymlink = minimal.TypeSymlink
|
||||
)
|
||||
|
||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
|
||||
189
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
189
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@@ -20,6 +20,12 @@ import (
|
||||
expMaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxTocSize is the maximum size of a blob that we will attempt to process.
|
||||
// It is used to prevent DoS attacks from layers that embed a very large TOC file.
|
||||
maxTocSize = (1 << 20) * 150
|
||||
)
|
||||
|
||||
var typesToTar = map[string]byte{
|
||||
TypeReg: tar.TypeReg,
|
||||
TypeLink: tar.TypeLink,
|
||||
@@ -38,33 +44,35 @@ func typeToTarType(t string) (byte, error) {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// readEstargzChunkedManifest reads the estargz manifest from the seekable stream blobStream.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) {
|
||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||
footerSize := int64(51)
|
||||
if blobSize <= footerSize {
|
||||
return nil, 0, errors.New("blob too small")
|
||||
}
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: uint64(blobSize - footerSize),
|
||||
Length: uint64(footerSize),
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)})
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
_, err = io.ReadFull(soe.stream, footer)
|
||||
_ = soe.stream.Close()
|
||||
}
|
||||
if soe.err != nil && err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
|
||||
/* Read the ToC offset:
|
||||
- 10 bytes gzip header
|
||||
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||
@@ -81,48 +89,59 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
||||
|
||||
size := int64(blobSize - footerSize - tocOffset)
|
||||
// set a reasonable limit
|
||||
if size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
if size > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz manifest too big to process in memory (%d bytes)", size))
|
||||
}
|
||||
|
||||
chunk = ImageSourceChunk{
|
||||
Offset: uint64(tocOffset),
|
||||
Length: uint64(size),
|
||||
}
|
||||
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)})
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var tocReader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
tocReader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tocReader.Close()
|
||||
var manifestUncompressed []byte
|
||||
|
||||
r, err := pgzip.NewReader(tocReader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer r.Close()
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
err1 := func() error {
|
||||
defer soe.stream.Close()
|
||||
|
||||
aTar := archivetar.NewReader(r)
|
||||
r, err := pgzip.NewReader(soe.stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
aTar := archivetar.NewReader(r)
|
||||
|
||||
manifestUncompressed := make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return nil, 0, err
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > maxTocSize {
|
||||
return errors.New("manifest too big")
|
||||
}
|
||||
|
||||
manifestUncompressed = make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
} else if err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
if manifestUncompressed == nil {
|
||||
return nil, 0, errors.New("manifest not found")
|
||||
}
|
||||
|
||||
manifestDigester := digest.Canonical.Digester()
|
||||
@@ -140,10 +159,11 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
||||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ []byte, _ int64, retErr error) {
|
||||
offsetMetadata := annotations[minimal.ManifestInfoKey]
|
||||
if offsetMetadata == "" {
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey)
|
||||
}
|
||||
var manifestChunk ImageSourceChunk
|
||||
var manifestLengthUncompressed, manifestType uint64
|
||||
@@ -153,48 +173,59 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
||||
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
|
||||
var tarSplitChunk ImageSourceChunk
|
||||
var tarSplitLengthUncompressed uint64
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[minimal.TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if manifestType != internal.ManifestTypeCRFS {
|
||||
if manifestType != minimal.ManifestTypeCRFS {
|
||||
return nil, nil, nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if manifestChunk.Length > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestChunk.Length > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes compressed)", manifestChunk.Length))
|
||||
}
|
||||
if manifestLengthUncompressed > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestLengthUncompressed > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes uncompressed)", manifestLengthUncompressed))
|
||||
}
|
||||
|
||||
chunks := []ImageSourceChunk{manifestChunk}
|
||||
if tarSplitChunk.Offset > 0 {
|
||||
chunks = append(chunks, tarSplitChunk)
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||
|
||||
streamsOrErrors, err := getBlobAt(blobStream, chunks...)
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, err
|
||||
defer func() {
|
||||
err := ensureAllBlobsDone(streamsOrErrors)
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
soe, ok := <-streamsOrErrors
|
||||
if !ok {
|
||||
return nil, errors.New("stream closed")
|
||||
}
|
||||
if soe.err != nil {
|
||||
return nil, soe.err
|
||||
}
|
||||
defer soe.stream.Close()
|
||||
|
||||
blob := make([]byte, len)
|
||||
if _, err := io.ReadFull(reader, blob); err != nil {
|
||||
reader.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err := reader.Close(); err != nil {
|
||||
if _, err := io.ReadFull(soe.stream, blob); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blob, nil
|
||||
@@ -217,7 +248,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
||||
var decodedTarSplit []byte = nil
|
||||
if toc.TarSplitDigest != "" {
|
||||
if tarSplitChunk.Offset <= 0 {
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey)
|
||||
}
|
||||
tarSplit, err := readBlob(tarSplitChunk.Length)
|
||||
if err != nil {
|
||||
@@ -247,11 +278,11 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
||||
}
|
||||
|
||||
// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
|
||||
func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
||||
pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries
|
||||
func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
|
||||
pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries
|
||||
for i := range toc.Entries {
|
||||
e := &toc.Entries[i]
|
||||
if e.Type != internal.TypeChunk {
|
||||
if e.Type != minimal.TypeChunk {
|
||||
if _, ok := pendingFiles[e.Name]; ok {
|
||||
return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name)
|
||||
}
|
||||
@@ -266,7 +297,7 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
||||
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
|
||||
}
|
||||
delete(pendingFiles, hdr.Name)
|
||||
expected, err := internal.NewFileMetadata(hdr)
|
||||
expected, err := minimal.NewFileMetadata(hdr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err)
|
||||
}
|
||||
@@ -347,8 +378,8 @@ func ensureTimePointersMatch(a, b *time.Time) error {
|
||||
|
||||
// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
|
||||
// in the tar stream or matching contents)
|
||||
func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error {
|
||||
// Keep this in sync with internal.FileMetadata!
|
||||
func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error {
|
||||
// Keep this in sync with minimal.FileMetadata!
|
||||
|
||||
if a.Type != b.Type {
|
||||
return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type)
|
||||
|
||||
24
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
24
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -213,7 +213,7 @@ func newTarSplitData(level int) (*tarSplitData, error) {
|
||||
compressed := bytes.NewBuffer(nil)
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
|
||||
zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -254,7 +254,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
|
||||
buf := make([]byte, 4096)
|
||||
|
||||
zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
|
||||
zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -276,7 +276,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
var metadata []internal.FileMetadata
|
||||
var metadata []minimal.FileMetadata
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
@@ -341,9 +341,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
|
||||
chunkSize := rcReader.WrittenOut - lastChunkOffset
|
||||
if chunkSize > 0 {
|
||||
chunkType := internal.ChunkTypeData
|
||||
chunkType := minimal.ChunkTypeData
|
||||
if rcReader.IsLastChunkZeros {
|
||||
chunkType = internal.ChunkTypeZeros
|
||||
chunkType = minimal.ChunkTypeZeros
|
||||
}
|
||||
|
||||
chunks = append(chunks, chunk{
|
||||
@@ -368,17 +368,17 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
}
|
||||
}
|
||||
|
||||
mainEntry, err := internal.NewFileMetadata(hdr)
|
||||
mainEntry, err := minimal.NewFileMetadata(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mainEntry.Digest = checksum
|
||||
mainEntry.Offset = startOffset
|
||||
mainEntry.EndOffset = lastOffset
|
||||
entries := []internal.FileMetadata{mainEntry}
|
||||
entries := []minimal.FileMetadata{mainEntry}
|
||||
for i := 1; i < len(chunks); i++ {
|
||||
entries = append(entries, internal.FileMetadata{
|
||||
Type: internal.TypeChunk,
|
||||
entries = append(entries, minimal.FileMetadata{
|
||||
Type: minimal.TypeChunk,
|
||||
Name: hdr.Name,
|
||||
ChunkOffset: chunks[i].ChunkOffset,
|
||||
})
|
||||
@@ -424,13 +424,13 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
}
|
||||
tarSplitData.zstd = nil
|
||||
|
||||
ts := internal.TarSplitData{
|
||||
ts := minimal.TarSplitData{
|
||||
Data: tarSplitData.compressed.Bytes(),
|
||||
Digest: tarSplitData.digester.Digest(),
|
||||
UncompressedSize: tarSplitData.uncompressedCounter.Count,
|
||||
}
|
||||
|
||||
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
|
||||
return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
|
||||
}
|
||||
|
||||
type zstdChunkedWriter struct {
|
||||
|
||||
66
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
66
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
@@ -9,10 +9,11 @@ import (
|
||||
"io"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -85,17 +86,17 @@ func escapedOptional(val []byte, escape int) string {
|
||||
|
||||
func getStMode(mode uint32, typ string) (uint32, error) {
|
||||
switch typ {
|
||||
case internal.TypeReg, internal.TypeLink:
|
||||
case minimal.TypeReg, minimal.TypeLink:
|
||||
mode |= unix.S_IFREG
|
||||
case internal.TypeChar:
|
||||
case minimal.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case internal.TypeBlock:
|
||||
case minimal.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case internal.TypeDir:
|
||||
case minimal.TypeDir:
|
||||
mode |= unix.S_IFDIR
|
||||
case internal.TypeFifo:
|
||||
case minimal.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
case internal.TypeSymlink:
|
||||
case minimal.TypeSymlink:
|
||||
mode |= unix.S_IFLNK
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown type %s", typ)
|
||||
@@ -103,24 +104,14 @@ func getStMode(mode uint32, typ string) (uint32, error) {
|
||||
return mode, nil
|
||||
}
|
||||
|
||||
func sanitizeName(name string) string {
|
||||
path := filepath.Clean(name)
|
||||
if path == "." {
|
||||
path = "/"
|
||||
} else if path[0] != '/' {
|
||||
path = "/" + path
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
|
||||
path := sanitizeName(entry.Name)
|
||||
func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *minimal.FileMetadata) error {
|
||||
path := storagePath.CleanAbsPath(entry.Name)
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
if _, found := added[parent]; !found && path != "/" {
|
||||
parentEntry := &internal.FileMetadata{
|
||||
parentEntry := &minimal.FileMetadata{
|
||||
Name: parent,
|
||||
Type: internal.TypeDir,
|
||||
Type: minimal.TypeDir,
|
||||
Mode: 0o755,
|
||||
}
|
||||
if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil {
|
||||
@@ -143,7 +134,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
||||
|
||||
nlinks := links[entry.Name] + links[entry.Linkname] + 1
|
||||
link := ""
|
||||
if entry.Type == internal.TypeLink {
|
||||
if entry.Type == minimal.TypeLink {
|
||||
link = "@"
|
||||
}
|
||||
|
||||
@@ -169,16 +160,21 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
||||
|
||||
var payload string
|
||||
if entry.Linkname != "" {
|
||||
if entry.Type == internal.TypeSymlink {
|
||||
if entry.Type == minimal.TypeSymlink {
|
||||
payload = entry.Linkname
|
||||
} else {
|
||||
payload = sanitizeName(entry.Linkname)
|
||||
payload = storagePath.CleanAbsPath(entry.Linkname)
|
||||
}
|
||||
} else {
|
||||
if len(entry.Digest) > 10 {
|
||||
d := strings.Replace(entry.Digest, "sha256:", "", 1)
|
||||
payload = d[:2] + "/" + d[2:]
|
||||
} else if entry.Digest != "" {
|
||||
d, err := digest.Parse(entry.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid digest %q for %q: %w", entry.Digest, entry.Name, err)
|
||||
}
|
||||
path, err := storagePath.RegularFilePathForValidatedDigest(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining physical file path for %q: %w", entry.Name, err)
|
||||
}
|
||||
payload = path
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil {
|
||||
@@ -219,7 +215,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
||||
|
||||
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
|
||||
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
|
||||
toc, ok := tocI.(*internal.TOC)
|
||||
toc, ok := tocI.(*minimal.TOC)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid TOC type")
|
||||
}
|
||||
@@ -235,21 +231,21 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
|
||||
}()
|
||||
|
||||
links := make(map[string]int)
|
||||
added := make(map[string]*internal.FileMetadata)
|
||||
added := make(map[string]*minimal.FileMetadata)
|
||||
for _, e := range toc.Entries {
|
||||
if e.Linkname == "" {
|
||||
continue
|
||||
}
|
||||
if e.Type == internal.TypeSymlink {
|
||||
if e.Type == minimal.TypeSymlink {
|
||||
continue
|
||||
}
|
||||
links[e.Linkname] = links[e.Linkname] + 1
|
||||
}
|
||||
|
||||
if len(toc.Entries) == 0 {
|
||||
root := &internal.FileMetadata{
|
||||
root := &minimal.FileMetadata{
|
||||
Name: "/",
|
||||
Type: internal.TypeDir,
|
||||
Type: minimal.TypeDir,
|
||||
Mode: 0o755,
|
||||
}
|
||||
|
||||
@@ -261,7 +257,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
|
||||
}
|
||||
|
||||
for _, e := range toc.Entries {
|
||||
if e.Type == internal.TypeChunk {
|
||||
if e.Type == minimal.TypeChunk {
|
||||
continue
|
||||
}
|
||||
if err := dumpNode(w, added, links, verityDigests, &e); err != nil {
|
||||
|
||||
73
vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
generated
vendored
73
vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
generated
vendored
@@ -15,7 +15,8 @@ import (
|
||||
|
||||
driversCopy "github.com/containers/storage/drivers/copy"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -34,14 +35,14 @@ func procPathForFd(fd int) string {
|
||||
return fmt.Sprintf("/proc/self/fd/%d", fd)
|
||||
}
|
||||
|
||||
// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
|
||||
// fileMetadata is a wrapper around minimal.FileMetadata with additional private fields that
|
||||
// are not part of the TOC document.
|
||||
// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
|
||||
type fileMetadata struct {
|
||||
internal.FileMetadata
|
||||
minimal.FileMetadata
|
||||
|
||||
// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
|
||||
chunks []*internal.FileMetadata
|
||||
chunks []*minimal.FileMetadata
|
||||
|
||||
// skipSetAttrs is set when the file attributes must not be
|
||||
// modified, e.g. it is a hard link from a different source,
|
||||
@@ -49,10 +50,37 @@ type fileMetadata struct {
|
||||
skipSetAttrs bool
|
||||
}
|
||||
|
||||
// splitPath takes a file path as input and returns two components: dir and base.
|
||||
// Differently than filepath.Split(), this function handles some edge cases.
|
||||
// If the path refers to a file in the root directory, the returned dir is "/".
|
||||
// The returned base value is never empty, it never contains any slash and the
|
||||
// value "..".
|
||||
func splitPath(path string) (string, string, error) {
|
||||
path = storagePath.CleanAbsPath(path)
|
||||
dir, base := filepath.Split(path)
|
||||
if base == "" {
|
||||
base = "."
|
||||
}
|
||||
// Remove trailing slashes from dir, but make sure that "/" is preserved.
|
||||
dir = strings.TrimSuffix(dir, "/")
|
||||
if dir == "" {
|
||||
dir = "/"
|
||||
}
|
||||
|
||||
if strings.Contains(base, "/") {
|
||||
// This should never happen, but be safe as the base is passed to *at syscalls.
|
||||
return "", "", fmt.Errorf("internal error: splitPath(%q) contains a slash", path)
|
||||
}
|
||||
return dir, base, nil
|
||||
}
|
||||
|
||||
func doHardLink(dirfd, srcFd int, destFile string) error {
|
||||
destDir, destBase := filepath.Split(destFile)
|
||||
destDir, destBase, err := splitPath(destFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDirFd := dirfd
|
||||
if destDir != "" && destDir != "." {
|
||||
if destDir != "/" {
|
||||
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -72,7 +100,7 @@ func doHardLink(dirfd, srcFd int, destFile string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := doLink()
|
||||
err = doLink()
|
||||
|
||||
// if the destination exists, unlink it first and try again
|
||||
if err != nil && os.IsExist(err) {
|
||||
@@ -281,8 +309,11 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
|
||||
// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
|
||||
// last component as the path to openat().
|
||||
if hasNoFollow {
|
||||
dirName, baseName := filepath.Split(name)
|
||||
if dirName != "" && dirName != "." {
|
||||
dirName, baseName, err := splitPath(name)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if dirName != "/" {
|
||||
newRoot, err := securejoin.SecureJoin(root, dirName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
@@ -409,7 +440,8 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil
|
||||
|
||||
if errors.Is(err, unix.ENOENT) {
|
||||
parent := filepath.Dir(name)
|
||||
if parent != "" {
|
||||
// do not create the root directory, it should always exist
|
||||
if parent != name {
|
||||
pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode)
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
@@ -448,9 +480,12 @@ func appendHole(fd int, name string, size int64) error {
|
||||
}
|
||||
|
||||
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
|
||||
parent, base := filepath.Split(name)
|
||||
parent, base, err := splitPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parentFd := dirfd
|
||||
if parent != "" && parent != "." {
|
||||
if parent != "/" {
|
||||
parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -506,9 +541,12 @@ func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *arch
|
||||
}
|
||||
|
||||
func safeSymlink(dirfd int, metadata *fileMetadata) error {
|
||||
destDir, destBase := filepath.Split(metadata.Name)
|
||||
destDir, destBase, err := splitPath(metadata.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDirFd := dirfd
|
||||
if destDir != "" && destDir != "." {
|
||||
if destDir != "/" {
|
||||
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -542,9 +580,12 @@ func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
|
||||
}
|
||||
|
||||
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
|
||||
dir, base := filepath.Split(path)
|
||||
dir, base, err := splitPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirfd := d.Dirfd
|
||||
if dir != "" && dir != "." {
|
||||
if dir != "/" {
|
||||
dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package internal
|
||||
package minimal
|
||||
|
||||
// NOTE: This is used from github.com/containers/image by callers that
|
||||
// don't otherwise use containers/storage, so don't make this depend on any
|
||||
27
vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
generated
vendored
Normal file
27
vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// CleanAbsPath removes any ".." and "." from the path
|
||||
// and ensures it starts with a "/". If the path refers to the root
|
||||
// directory, it returns "/".
|
||||
func CleanAbsPath(path string) string {
|
||||
return filepath.Clean("/" + path)
|
||||
}
|
||||
|
||||
// RegularFilePath returns the path used in the composefs backing store for a
|
||||
// regular file with the provided content digest.
|
||||
//
|
||||
// The caller MUST ensure d is a valid digest (in particular, that it contains no path separators or .. entries)
|
||||
func RegularFilePathForValidatedDigest(d digest.Digest) (string, error) {
|
||||
if algo := d.Algorithm(); algo != digest.SHA256 {
|
||||
return "", fmt.Errorf("unexpected digest algorithm %q", algo)
|
||||
}
|
||||
e := d.Encoded()
|
||||
return e[0:2] + "/" + e[2:], nil
|
||||
}
|
||||
614
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
614
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@@ -2,6 +2,7 @@ package chunked
|
||||
|
||||
import (
|
||||
archivetar "archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
@@ -22,17 +23,21 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
path "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/fsverity"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
tsStorage "github.com/vbatts/tar-split/tar/storage"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -57,46 +62,53 @@ const (
|
||||
type compressedFileType int
|
||||
|
||||
type chunkedDiffer struct {
|
||||
// Initial parameters, used throughout and never modified
|
||||
// ==========
|
||||
pullOptions pullOptions
|
||||
stream ImageSourceSeekable
|
||||
manifest []byte
|
||||
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
layersCache *layersCache
|
||||
tocOffset int64
|
||||
fileType compressedFileType
|
||||
|
||||
copyBuffer []byte
|
||||
|
||||
gzipReader *pgzip.Reader
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled.
|
||||
tocDigest digest.Digest
|
||||
// blobDigest is the digest of the whole compressed layer. It is used if
|
||||
// convertToZstdChunked to validate a layer when it is converted since there
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
blobSize int64
|
||||
|
||||
// Input format
|
||||
// ==========
|
||||
fileType compressedFileType
|
||||
// convertedToZstdChunked is set to true if the layer needs to
|
||||
// be converted to the zstd:chunked format before it can be
|
||||
// handled.
|
||||
convertToZstdChunked bool
|
||||
|
||||
// Chunked metadata
|
||||
// This is usually set in GetDiffer, but if convertToZstdChunked, it is only computed in chunkedDiffer.ApplyDiff
|
||||
// ==========
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled, or "" if not relevant to consumers.
|
||||
tocDigest digest.Digest
|
||||
tocOffset int64
|
||||
manifest []byte
|
||||
toc *minimal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
// skipValidation is set to true if the individual files in
|
||||
// the layer are trusted and should not be validated.
|
||||
skipValidation bool
|
||||
|
||||
// blobDigest is the digest of the whole compressed layer. It is used if
|
||||
// convertToZstdChunked to validate a layer when it is converted since there
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
|
||||
blobSize int64
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
|
||||
pullOptions map[string]string
|
||||
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
// Long-term caches
|
||||
// This is set in GetDiffer, when the caller must not hold any storage locks, and later consumed in .ApplyDiff()
|
||||
// ==========
|
||||
layersCache *layersCache
|
||||
copyBuffer []byte
|
||||
fsVerityMutex sync.Mutex // protects fsVerityDigests
|
||||
fsVerityDigests map[string]string
|
||||
fsVerityMutex sync.Mutex
|
||||
|
||||
// Private state of .ApplyDiff
|
||||
// ==========
|
||||
gzipReader *pgzip.Reader
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
@@ -108,6 +120,42 @@ type chunkedLayerData struct {
|
||||
Format graphdriver.DifferOutputFormat `json:"format"`
|
||||
}
|
||||
|
||||
// pullOptions contains parsed data from storage.Store.PullOptions.
|
||||
// TO DO: ideally this should be parsed along with the rest of the config file into StoreOptions directly
|
||||
// (and then storage.Store.PullOptions would need to be somehow simulated).
|
||||
type pullOptions struct {
|
||||
enablePartialImages bool // enable_partial_images
|
||||
convertImages bool // convert_images
|
||||
useHardLinks bool // use_hard_links
|
||||
insecureAllowUnpredictableImageContents bool // insecure_allow_unpredictable_image_contents
|
||||
ostreeRepos []string // ostree_repos
|
||||
}
|
||||
|
||||
func parsePullOptions(store storage.Store) pullOptions {
|
||||
options := store.PullOptions()
|
||||
|
||||
res := pullOptions{}
|
||||
for _, e := range []struct {
|
||||
dest *bool
|
||||
name string
|
||||
defaultValue bool
|
||||
}{
|
||||
{&res.enablePartialImages, "enable_partial_images", false},
|
||||
{&res.convertImages, "convert_images", false},
|
||||
{&res.useHardLinks, "use_hard_links", false},
|
||||
{&res.insecureAllowUnpredictableImageContents, "insecure_allow_unpredictable_image_contents", false},
|
||||
} {
|
||||
if value, ok := options[e.name]; ok {
|
||||
*e.dest = strings.ToLower(value) == "true"
|
||||
} else {
|
||||
*e.dest = e.defaultValue
|
||||
}
|
||||
}
|
||||
res.ostreeRepos = strings.Split(options["ostree_repos"], ":")
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
|
||||
diff, err := archive.DecompressStream(payload)
|
||||
if err != nil {
|
||||
@@ -144,127 +192,160 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
|
||||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// If it returns an error that matches ErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// retry the operation with a different method.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
pullOptions := store.PullOptions()
|
||||
pullOptions := parsePullOptions(store)
|
||||
|
||||
if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) {
|
||||
// If convertImages is set, the two options disagree whether fallback is permissible.
|
||||
if !pullOptions.enablePartialImages {
|
||||
// If pullOptions.convertImages is set, the two options disagree whether fallback is permissible.
|
||||
// Right now, we enable it, but that’s not a promise; rather, such a configuration should ideally be rejected.
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
|
||||
}
|
||||
// convertImages also serves as a “must not fallback to non-partial pull” option (?!)
|
||||
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
|
||||
// pullOptions.convertImages also serves as a “must not fallback to non-partial pull” option (?!)
|
||||
|
||||
graphDriver, err := store.GraphDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported {
|
||||
if convertImages {
|
||||
if pullOptions.convertImages {
|
||||
return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String())
|
||||
}
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String()))
|
||||
}
|
||||
|
||||
differ, canFallback, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
|
||||
differ, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
|
||||
if err != nil {
|
||||
if !canFallback {
|
||||
var fallbackErr ErrFallbackToOrdinaryLayerDownload
|
||||
if !errors.As(err, &fallbackErr) {
|
||||
return nil, err
|
||||
}
|
||||
// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
|
||||
if convertImages {
|
||||
logrus.Debugf("Created differ to convert blob %q", blobDigest)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
if !pullOptions.convertImages {
|
||||
return nil, err
|
||||
}
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(err)
|
||||
var canConvertErr errFallbackCanConvert
|
||||
if !errors.As(err, &canConvertErr) {
|
||||
// We are supposed to use makeConvertFromRawDiffer, but that would not work.
|
||||
// Fail, and make sure the error does _not_ match ErrFallbackToOrdinaryLayerDownload: use only the error text,
|
||||
// discard all type information.
|
||||
return nil, fmt.Errorf("neither a partial pull nor convert_images is possible: %s", err.Error())
|
||||
}
|
||||
logrus.Debugf("Created differ to convert blob %q", blobDigest)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
}
|
||||
|
||||
return differ, nil
|
||||
}
|
||||
|
||||
// errFallbackCanConvert is an an error type _accompanying_ ErrFallbackToOrdinaryLayerDownload
|
||||
// within getProperDiffer, to mark that using makeConvertFromRawDiffer makes sense.
|
||||
// This is used to distinguish between cases where the environment does not support partial pulls
|
||||
// (e.g. a registry does not support range requests) and convert_images is still possible,
|
||||
// from cases where the image content is unacceptable for partial pulls (e.g. exceeds memory limits)
|
||||
// and convert_images would not help.
|
||||
type errFallbackCanConvert struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errFallbackCanConvert) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
func (e errFallbackCanConvert) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// getProperDiffer is an implementation detail of GetDiffer.
|
||||
// It returns a “proper” differ (not a convert_images one) if possible.
|
||||
// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
|
||||
// is permissible.
|
||||
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) {
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
// May return an error matching ErrFallbackToOrdinaryLayerDownload if a fallback to an alternative
|
||||
// (either makeConvertFromRawDiffer, or a non-partial pull) is permissible.
|
||||
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (graphdriver.Differ, error) {
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[minimal.ManifestChecksumKey]
|
||||
estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||
|
||||
switch {
|
||||
case hasZstdChunkedTOC && hasEstargzTOC:
|
||||
return nil, false, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
|
||||
case hasZstdChunkedTOC:
|
||||
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err)
|
||||
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
|
||||
var badRequestErr ErrBadRequest
|
||||
return nil, errors.As(err, &badRequestErr), err
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
|
||||
return differ, false, nil
|
||||
return differ, nil
|
||||
|
||||
case hasEstargzTOC:
|
||||
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err)
|
||||
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
|
||||
var badRequestErr ErrBadRequest
|
||||
return nil, errors.As(err, &badRequestErr), err
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
|
||||
return differ, false, nil
|
||||
return differ, nil
|
||||
|
||||
default: // no TOC
|
||||
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
|
||||
if !convertImages {
|
||||
return nil, true, errors.New("no TOC found and convert_images is not configured")
|
||||
message := "no TOC found"
|
||||
if !pullOptions.convertImages {
|
||||
message = "no TOC found and convert_images is not configured"
|
||||
}
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(errors.New(message)),
|
||||
}
|
||||
return nil, true, errors.New("no TOC found")
|
||||
}
|
||||
}
|
||||
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
layersCache, err := getLayersCache(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // Will be computed later
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
|
||||
convertToZstdChunked: true,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
layersCache: layersCache,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
|
||||
uncompressedTarSize: -1, // Will be computed later
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
// makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
if err != nil {
|
||||
if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
||||
var uncompressedTarSize int64 = -1
|
||||
if tarSplit != nil {
|
||||
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("computing size from tar-split: %w", err)
|
||||
}
|
||||
} else if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked layers without tar-split data don't support partial pulls with guaranteed consistency with non-partial pulls")),
|
||||
}
|
||||
}
|
||||
|
||||
layersCache, err := getLayersCache(store)
|
||||
@@ -273,25 +354,36 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: uncompressedTarSize,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobSize: blobSize,
|
||||
|
||||
fileType: fileTypeZstdChunked,
|
||||
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
tocOffset: tocOffset,
|
||||
manifest: manifest,
|
||||
toc: toc,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
uncompressedTarSize: uncompressedTarSize,
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
// makeEstargzChunkedDiffer sets up a chunkedDiffer for an estargz layer.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz layers don't support partial pulls with guaranteed consistency with non-partial pulls")),
|
||||
}
|
||||
}
|
||||
|
||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
|
||||
if err != nil {
|
||||
if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
layersCache, err := getLayersCache(store)
|
||||
@@ -300,17 +392,20 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobSize: blobSize,
|
||||
|
||||
fileType: fileTypeEstargz,
|
||||
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeEstargz,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tocOffset: tocOffset,
|
||||
manifest: manifest,
|
||||
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -391,7 +486,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
|
||||
}
|
||||
// fill only the attributes used by canDedupMetadataWithHardLink.
|
||||
otherFile := fileMetadata{
|
||||
FileMetadata: internal.FileMetadata{
|
||||
FileMetadata: minimal.FileMetadata{
|
||||
UID: int(st.Uid),
|
||||
GID: int(st.Gid),
|
||||
Mode: int64(st.Mode),
|
||||
@@ -735,7 +830,12 @@ func (d *destinationFile) Close() (Err error) {
|
||||
}
|
||||
}
|
||||
|
||||
return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
|
||||
mode := os.FileMode(d.metadata.Mode)
|
||||
if d.options.ForceMask != nil {
|
||||
mode = *d.options.ForceMask
|
||||
}
|
||||
|
||||
return setFileAttrs(d.dirfd, d.file, mode, d.metadata, d.options, false)
|
||||
}
|
||||
|
||||
func closeDestinationFiles(files chan *destinationFile, errors chan error) {
|
||||
@@ -1038,13 +1138,6 @@ type hardLinkToCreate struct {
|
||||
metadata *fileMetadata
|
||||
}
|
||||
|
||||
func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool {
|
||||
if value, ok := pullOptions[name]; ok {
|
||||
return strings.ToLower(value) == "true"
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
type findAndCopyFileOptions struct {
|
||||
useHardLinks bool
|
||||
ostreeRepos []string
|
||||
@@ -1111,10 +1204,13 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
||||
// makeEntriesFlat collects regular-file entries from mergedEntries, and produces a new list
|
||||
// where each file content is only represented once, and uses composefs.RegularFilePathForValidatedDigest for its name.
|
||||
// If flatPathNameMap is not nil, this function writes to it a mapping from filepath.Clean(originalName) to the composefs name.
|
||||
func makeEntriesFlat(mergedEntries []fileMetadata, flatPathNameMap map[string]string) ([]fileMetadata, error) {
|
||||
var new []fileMetadata
|
||||
|
||||
hashes := make(map[string]string)
|
||||
knownFlatPaths := make(map[string]struct{})
|
||||
for i := range mergedEntries {
|
||||
if mergedEntries[i].Type != TypeReg {
|
||||
continue
|
||||
@@ -1124,16 +1220,22 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
||||
}
|
||||
digest, err := digest.Parse(mergedEntries[i].Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("invalid digest %q for %q: %w", mergedEntries[i].Digest, mergedEntries[i].Name, err)
|
||||
}
|
||||
path, err := path.RegularFilePathForValidatedDigest(digest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("determining physical file path for %q: %w", mergedEntries[i].Name, err)
|
||||
}
|
||||
if flatPathNameMap != nil {
|
||||
flatPathNameMap[filepath.Clean(mergedEntries[i].Name)] = path
|
||||
}
|
||||
d := digest.Encoded()
|
||||
|
||||
if hashes[d] != "" {
|
||||
if _, known := knownFlatPaths[path]; known {
|
||||
continue
|
||||
}
|
||||
hashes[d] = d
|
||||
knownFlatPaths[path] = struct{}{}
|
||||
|
||||
mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
|
||||
mergedEntries[i].Name = path
|
||||
mergedEntries[i].skipSetAttrs = true
|
||||
|
||||
new = append(new, mergedEntries[i])
|
||||
@@ -1141,44 +1243,140 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
||||
return new, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
|
||||
var payload io.ReadCloser
|
||||
var streams chan io.ReadCloser
|
||||
var errs chan error
|
||||
var err error
|
||||
type streamOrErr struct {
|
||||
stream io.ReadCloser
|
||||
err error
|
||||
}
|
||||
|
||||
chunksToRequest := []ImageSourceChunk{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: uint64(c.blobSize),
|
||||
},
|
||||
// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered.
|
||||
func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) {
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
_ = soe.stream.Close()
|
||||
} else if retErr == nil {
|
||||
retErr = soe.err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
|
||||
// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends
|
||||
// either a stream or an error to the stream channel. The streams channel is closed when
|
||||
// there are no more streams and errors to read.
|
||||
// It ensures that no more than maxStreams streams are returned, and that every item from the
|
||||
// streams and errs channels is consumed.
|
||||
func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) {
|
||||
tooManyStreams := false
|
||||
streamsSoFar := 0
|
||||
|
||||
err := errors.New("Unexpected error in getBlobAtGoroutine")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
close(stream)
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case p, ok := <-streams:
|
||||
if !ok {
|
||||
streams = nil
|
||||
break loop
|
||||
}
|
||||
if streamsSoFar >= maxStreams {
|
||||
tooManyStreams = true
|
||||
_ = p.Close()
|
||||
continue
|
||||
}
|
||||
streamsSoFar++
|
||||
stream <- streamOrErr{stream: p}
|
||||
case err, ok := <-errs:
|
||||
if !ok {
|
||||
errs = nil
|
||||
break loop
|
||||
}
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
}
|
||||
if streams != nil {
|
||||
for p := range streams {
|
||||
if streamsSoFar >= maxStreams {
|
||||
tooManyStreams = true
|
||||
_ = p.Close()
|
||||
continue
|
||||
}
|
||||
streamsSoFar++
|
||||
stream <- streamOrErr{stream: p}
|
||||
}
|
||||
}
|
||||
if errs != nil {
|
||||
for err := range errs {
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
}
|
||||
if tooManyStreams {
|
||||
stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
|
||||
// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt.
|
||||
// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel
|
||||
// always succeeds in select.
|
||||
// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`.
|
||||
func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) {
|
||||
streams, errs, err := is.GetBlobAt(chunksToRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream := make(chan streamOrErr)
|
||||
go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest))
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
|
||||
streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
select {
|
||||
case p := <-streams:
|
||||
payload = p
|
||||
case err := <-errs:
|
||||
return "", err
|
||||
}
|
||||
if payload == nil {
|
||||
return "", errors.New("invalid stream returned")
|
||||
}
|
||||
defer payload.Close()
|
||||
|
||||
originalRawDigester := digest.Canonical.Digester()
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
r := io.TeeReader(soe.stream, originalRawDigester.Hash())
|
||||
|
||||
r := io.TeeReader(payload, originalRawDigester.Hash())
|
||||
|
||||
// copy the entire tarball and compute its digest
|
||||
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
|
||||
|
||||
// copy the entire tarball and compute its digest
|
||||
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
|
||||
_ = soe.stream.Close()
|
||||
}
|
||||
if soe.err != nil && err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
return originalRawDigester.Digest(), err
|
||||
}
|
||||
|
||||
func typeToOsMode(typ string) (os.FileMode, error) {
|
||||
switch typ {
|
||||
case TypeReg, TypeLink:
|
||||
return 0, nil
|
||||
case TypeSymlink:
|
||||
return os.ModeSymlink, nil
|
||||
case TypeDir:
|
||||
return os.ModeDir, nil
|
||||
case TypeChar:
|
||||
return os.ModeDevice | os.ModeCharDevice, nil
|
||||
case TypeBlock:
|
||||
return os.ModeDevice, nil
|
||||
case TypeFifo:
|
||||
return os.ModeNamedPipe, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unknown file type %q", typ)
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
defer c.layersCache.release()
|
||||
defer func() {
|
||||
@@ -1298,13 +1496,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
Size: c.uncompressedTarSize,
|
||||
}
|
||||
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
// modifies the source file as well.
|
||||
useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false)
|
||||
|
||||
// List of OSTree repositories to use for deduplication
|
||||
ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":")
|
||||
|
||||
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
|
||||
var missingParts []missingPart
|
||||
@@ -1325,7 +1516,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
if err == nil {
|
||||
value := idtools.Stat{
|
||||
IDs: idtools.IDPair{UID: int(uid), GID: int(gid)},
|
||||
Mode: os.FileMode(mode),
|
||||
Mode: os.ModeDir | os.FileMode(mode),
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
|
||||
return output, err
|
||||
@@ -1337,16 +1528,20 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
if err != nil {
|
||||
return output, &fs.PathError{Op: "open", Path: dest, Err: err}
|
||||
}
|
||||
defer unix.Close(dirfd)
|
||||
dirFile := os.NewFile(uintptr(dirfd), dest)
|
||||
defer dirFile.Close()
|
||||
|
||||
var flatPathNameMap map[string]string // = nil
|
||||
if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
|
||||
mergedEntries, err = makeEntriesFlat(mergedEntries)
|
||||
flatPathNameMap = map[string]string{}
|
||||
mergedEntries, err = makeEntriesFlat(mergedEntries, flatPathNameMap)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
createdDirs := make(map[string]struct{})
|
||||
for _, e := range mergedEntries {
|
||||
d := e.Name[0:2]
|
||||
// This hard-codes an assumption that RegularFilePathForValidatedDigest creates paths with exactly one directory component.
|
||||
d := filepath.Dir(e.Name)
|
||||
if _, found := createdDirs[d]; !found {
|
||||
if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
|
||||
return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
|
||||
@@ -1363,8 +1558,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
missingPartsSize, totalChunksSize := int64(0), int64(0)
|
||||
|
||||
copyOptions := findAndCopyFileOptions{
|
||||
useHardLinks: useHardLinks,
|
||||
ostreeRepos: ostreeRepos,
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
// modifies the source file as well.
|
||||
useHardLinks: c.pullOptions.useHardLinks,
|
||||
ostreeRepos: c.pullOptions.ostreeRepos, // List of OSTree repositories to use for deduplication
|
||||
options: options,
|
||||
}
|
||||
|
||||
@@ -1408,13 +1605,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
filesToWaitFor := 0
|
||||
for i := range mergedEntries {
|
||||
r := &mergedEntries[i]
|
||||
if options.ForceMask != nil {
|
||||
value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode))
|
||||
if r.Xattrs == nil {
|
||||
r.Xattrs = make(map[string]string)
|
||||
}
|
||||
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
|
||||
mode := os.FileMode(r.Mode)
|
||||
|
||||
@@ -1423,10 +1613,37 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
return output, err
|
||||
}
|
||||
|
||||
r.Name = filepath.Clean(r.Name)
|
||||
size := r.Size
|
||||
|
||||
// update also the implementation of ForceMask in pkg/archive
|
||||
if options.ForceMask != nil {
|
||||
mode = *options.ForceMask
|
||||
|
||||
// special files will be stored as regular files
|
||||
if t != tar.TypeDir && t != tar.TypeSymlink && t != tar.TypeReg && t != tar.TypeLink {
|
||||
t = tar.TypeReg
|
||||
size = 0
|
||||
}
|
||||
|
||||
// if the entry will be stored as a directory or a regular file, store in a xattr the original
|
||||
// owner and mode.
|
||||
if t == tar.TypeDir || t == tar.TypeReg {
|
||||
typeMode, err := typeToOsMode(r.Type)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
value := idtools.FormatContainersOverrideXattrDevice(r.UID, r.GID, typeMode|fs.FileMode(r.Mode), int(r.Devmajor), int(r.Devminor))
|
||||
if r.Xattrs == nil {
|
||||
r.Xattrs = make(map[string]string)
|
||||
}
|
||||
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
}
|
||||
|
||||
r.Name = path.CleanAbsPath(r.Name)
|
||||
// do not modify the value of symlinks
|
||||
if r.Linkname != "" && t != tar.TypeSymlink {
|
||||
r.Linkname = filepath.Clean(r.Linkname)
|
||||
r.Linkname = path.CleanAbsPath(r.Linkname)
|
||||
}
|
||||
|
||||
if whiteoutConverter != nil {
|
||||
@@ -1434,8 +1651,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
Typeflag: t,
|
||||
Name: r.Name,
|
||||
Linkname: r.Linkname,
|
||||
Size: r.Size,
|
||||
Mode: r.Mode,
|
||||
Size: size,
|
||||
Mode: int64(mode),
|
||||
Uid: r.UID,
|
||||
Gid: r.GID,
|
||||
}
|
||||
@@ -1454,7 +1671,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
switch t {
|
||||
case tar.TypeReg:
|
||||
// Create directly empty files.
|
||||
if r.Size == 0 {
|
||||
if size == 0 {
|
||||
// Used to have a scope for cleanup.
|
||||
createEmptyFile := func() error {
|
||||
file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0)
|
||||
@@ -1474,7 +1691,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
}
|
||||
|
||||
case tar.TypeDir:
|
||||
if r.Name == "" || r.Name == "." {
|
||||
if r.Name == "/" {
|
||||
output.RootDirMode = &mode
|
||||
}
|
||||
if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil {
|
||||
@@ -1509,7 +1726,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
return output, fmt.Errorf("invalid type %q", t)
|
||||
}
|
||||
|
||||
totalChunksSize += r.Size
|
||||
totalChunksSize += size
|
||||
|
||||
if t == tar.TypeReg {
|
||||
index := i
|
||||
@@ -1572,7 +1789,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
}
|
||||
|
||||
switch chunk.ChunkType {
|
||||
case internal.ChunkTypeData:
|
||||
case minimal.ChunkTypeData:
|
||||
root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
|
||||
if err != nil {
|
||||
return output, err
|
||||
@@ -1585,7 +1802,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
Offset: offset,
|
||||
}
|
||||
}
|
||||
case internal.ChunkTypeZeros:
|
||||
case minimal.ChunkTypeZeros:
|
||||
missingPartsSize -= size
|
||||
mp.Hole = true
|
||||
// Mark all chunks belonging to the missing part as holes
|
||||
@@ -1609,6 +1826,39 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
}
|
||||
}
|
||||
|
||||
// To ensure that consumers of the layer who decompress and read the full tar stream,
|
||||
// and consumers who consume the data via the TOC, both see exactly the same data and metadata,
|
||||
// compute the UncompressedDigest.
|
||||
// c/image will then ensure that this value matches the value in the image config’s RootFS.DiffID, i.e. the image must commit
|
||||
// to one UncompressedDigest value for each layer, and that will avoid the ambiguity (in consumers who validate layers against DiffID).
|
||||
//
|
||||
// c/image also uses the UncompressedDigest as a layer ID, allowing it to use the traditional layer and image IDs.
|
||||
//
|
||||
// This is, sadly, quite costly: Up to now we might have only have had to write, and digest, only the new/modified files.
|
||||
// Here we need to read, and digest, the whole layer, even if almost all of it was already present locally previously.
|
||||
// So, really specialized (EXTREMELY RARE) users can opt out of this check using insecureAllowUnpredictableImageContents .
|
||||
//
|
||||
// Layers without a tar-split (estargz layers and old zstd:chunked layers) can't produce an UncompressedDigest that
|
||||
// matches the expected RootFS.DiffID; we always fall back to full pulls, again unless the user opts out
|
||||
// via insecureAllowUnpredictableImageContents .
|
||||
if output.UncompressedDigest == "" {
|
||||
switch {
|
||||
case c.pullOptions.insecureAllowUnpredictableImageContents:
|
||||
// Oh well. Skip the costly digest computation.
|
||||
case output.TarSplit != nil:
|
||||
metadata := tsStorage.NewJSONUnpacker(bytes.NewReader(output.TarSplit))
|
||||
fg := newStagedFileGetter(dirFile, flatPathNameMap)
|
||||
digester := digest.Canonical.Digester()
|
||||
if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil {
|
||||
return output, fmt.Errorf("digesting staged uncompressed stream: %w", err)
|
||||
}
|
||||
output.UncompressedDigest = digester.Digest()
|
||||
default:
|
||||
// We are checking for this earlier in GetDiffer, so this should not be reachable.
|
||||
return output, fmt.Errorf(`internal error: layer's UncompressedDigest is unknown and "insecure_allow_unpredictable_image_contents" is not set`)
|
||||
}
|
||||
}
|
||||
|
||||
if totalChunksSize > 0 {
|
||||
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
|
||||
}
|
||||
@@ -1618,7 +1868,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
||||
func mustSkipFile(fileType compressedFileType, e minimal.FileMetadata) bool {
|
||||
// ignore the metadata files for the estargz format.
|
||||
if fileType != fileTypeEstargz {
|
||||
return false
|
||||
@@ -1631,7 +1881,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []minimal.FileMetadata) ([]fileMetadata, error) {
|
||||
countNextChunks := func(start int) int {
|
||||
count := 0
|
||||
for _, e := range entries[start:] {
|
||||
@@ -1668,7 +1918,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
||||
if e.Type == TypeReg {
|
||||
nChunks := countNextChunks(i + 1)
|
||||
|
||||
e.chunks = make([]*internal.FileMetadata, nChunks+1)
|
||||
e.chunks = make([]*minimal.FileMetadata, nChunks+1)
|
||||
for j := 0; j <= nChunks; j++ {
|
||||
// we need a copy here, otherwise we override the
|
||||
// .Size later
|
||||
@@ -1703,7 +1953,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
||||
|
||||
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
|
||||
// same digest as chunk.ChunkDigest
|
||||
func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
|
||||
func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
|
||||
parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -1734,3 +1984,33 @@ func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offs
|
||||
|
||||
return digester.Digest() == digest
|
||||
}
|
||||
|
||||
// newStagedFileGetter returns an object usable as storage.FileGetter for rootDir.
|
||||
// if flatPathNameMap is not nil, it must be used to map logical file names into the backing file paths.
|
||||
func newStagedFileGetter(rootDir *os.File, flatPathNameMap map[string]string) *stagedFileGetter {
|
||||
return &stagedFileGetter{
|
||||
rootDir: rootDir,
|
||||
flatPathNameMap: flatPathNameMap,
|
||||
}
|
||||
}
|
||||
|
||||
type stagedFileGetter struct {
|
||||
rootDir *os.File
|
||||
flatPathNameMap map[string]string // nil, or a map from filepath.Clean()ed tar file names to expected on-filesystem names
|
||||
}
|
||||
|
||||
func (fg *stagedFileGetter) Get(filename string) (io.ReadCloser, error) {
|
||||
if fg.flatPathNameMap != nil {
|
||||
path, ok := fg.flatPathNameMap[filepath.Clean(filename)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no path mapping exists for tar entry %q", filename)
|
||||
}
|
||||
filename = path
|
||||
}
|
||||
pathFD, err := securejoin.OpenatInRoot(fg.rootDir, filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer pathFD.Close()
|
||||
return securejoin.Reopen(pathFD, unix.O_RDONLY)
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
@@ -13,5 +13,5 @@ import (
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
return nil, errors.New("format not supported on this system")
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("format not supported on this system"))
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
@@ -3,7 +3,7 @@ package toc
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
||||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
d1, ok1 := annotations[tocJSONDigestAnnotation]
|
||||
d2, ok2 := annotations[internal.ManifestChecksumKey]
|
||||
d2, ok2 := annotations[minimal.ManifestChecksumKey]
|
||||
switch {
|
||||
case ok1 && ok2:
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
|
||||
177
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
177
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
@@ -369,27 +370,66 @@ func checkChownErr(err error, name string, uid, gid int) error {
|
||||
|
||||
// Stat contains file states that can be overridden with ContainersOverrideXattr.
|
||||
type Stat struct {
|
||||
IDs IDPair
|
||||
Mode os.FileMode
|
||||
IDs IDPair
|
||||
Mode os.FileMode
|
||||
Major int
|
||||
Minor int
|
||||
}
|
||||
|
||||
// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string
|
||||
// that can be used as the value for the ContainersOverrideXattr xattr.
|
||||
func FormatContainersOverrideXattr(uid, gid, mode int) string {
|
||||
return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777)
|
||||
return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0)
|
||||
}
|
||||
|
||||
// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string
|
||||
// that can be used as the value for the ContainersOverrideXattr xattr. For devices, it also
|
||||
// needs the major and minor numbers.
|
||||
func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string {
|
||||
typ := ""
|
||||
switch mode & os.ModeType {
|
||||
case os.ModeDir:
|
||||
typ = "dir"
|
||||
case os.ModeSymlink:
|
||||
typ = "symlink"
|
||||
case os.ModeNamedPipe:
|
||||
typ = "pipe"
|
||||
case os.ModeSocket:
|
||||
typ = "socket"
|
||||
case os.ModeDevice:
|
||||
typ = fmt.Sprintf("block-%d-%d", major, minor)
|
||||
case os.ModeDevice | os.ModeCharDevice:
|
||||
typ = fmt.Sprintf("char-%d-%d", major, minor)
|
||||
default:
|
||||
typ = "file"
|
||||
}
|
||||
unixMode := mode & os.ModePerm
|
||||
if mode&os.ModeSetuid != 0 {
|
||||
unixMode |= 0o4000
|
||||
}
|
||||
if mode&os.ModeSetgid != 0 {
|
||||
unixMode |= 0o2000
|
||||
}
|
||||
if mode&os.ModeSticky != 0 {
|
||||
unixMode |= 0o1000
|
||||
}
|
||||
return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ)
|
||||
}
|
||||
|
||||
// GetContainersOverrideXattr will get and decode ContainersOverrideXattr.
|
||||
func GetContainersOverrideXattr(path string) (Stat, error) {
|
||||
var stat Stat
|
||||
xstat, err := system.Lgetxattr(path, ContainersOverrideXattr)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
return Stat{}, err
|
||||
}
|
||||
return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist.
|
||||
}
|
||||
|
||||
func parseOverrideXattr(xstat []byte) (Stat, error) {
|
||||
var stat Stat
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) != 3 {
|
||||
return stat, fmt.Errorf("The number of clons in %s does not equal to 3",
|
||||
if len(attrs) < 3 {
|
||||
return stat, fmt.Errorf("The number of parts in %s is less than 3",
|
||||
ContainersOverrideXattr)
|
||||
}
|
||||
|
||||
@@ -397,47 +437,105 @@ func GetContainersOverrideXattr(path string) (Stat, error) {
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse UID: %w", err)
|
||||
}
|
||||
|
||||
stat.IDs.UID = int(value)
|
||||
|
||||
value, err = strconv.ParseUint(attrs[0], 10, 32)
|
||||
value, err = strconv.ParseUint(attrs[1], 10, 32)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse GID: %w", err)
|
||||
}
|
||||
|
||||
stat.IDs.GID = int(value)
|
||||
|
||||
value, err = strconv.ParseUint(attrs[2], 8, 32)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse mode: %w", err)
|
||||
}
|
||||
stat.Mode = os.FileMode(value) & os.ModePerm
|
||||
if value&0o1000 != 0 {
|
||||
stat.Mode |= os.ModeSticky
|
||||
}
|
||||
if value&0o2000 != 0 {
|
||||
stat.Mode |= os.ModeSetgid
|
||||
}
|
||||
if value&0o4000 != 0 {
|
||||
stat.Mode |= os.ModeSetuid
|
||||
}
|
||||
|
||||
stat.Mode = os.FileMode(value)
|
||||
|
||||
if len(attrs) > 3 {
|
||||
typ := attrs[3]
|
||||
if strings.HasPrefix(typ, "file") {
|
||||
} else if strings.HasPrefix(typ, "dir") {
|
||||
stat.Mode |= os.ModeDir
|
||||
} else if strings.HasPrefix(typ, "symlink") {
|
||||
stat.Mode |= os.ModeSymlink
|
||||
} else if strings.HasPrefix(typ, "pipe") {
|
||||
stat.Mode |= os.ModeNamedPipe
|
||||
} else if strings.HasPrefix(typ, "socket") {
|
||||
stat.Mode |= os.ModeSocket
|
||||
} else if strings.HasPrefix(typ, "block") {
|
||||
stat.Mode |= os.ModeDevice
|
||||
stat.Major, stat.Minor, err = parseDevice(typ)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
}
|
||||
} else if strings.HasPrefix(typ, "char") {
|
||||
stat.Mode |= os.ModeDevice | os.ModeCharDevice
|
||||
stat.Major, stat.Minor, err = parseDevice(typ)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
}
|
||||
} else {
|
||||
return stat, fmt.Errorf("Invalid file type %s", typ)
|
||||
}
|
||||
}
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func parseDevice(typ string) (int, int, error) {
|
||||
parts := strings.Split(typ, "-")
|
||||
// If there are more than 3 parts, just ignore them to be forward compatible
|
||||
if len(parts) < 3 {
|
||||
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
|
||||
}
|
||||
if parts[0] != "block" && parts[0] != "char" {
|
||||
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
|
||||
}
|
||||
major, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to parse major number: %w", err)
|
||||
}
|
||||
minor, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err)
|
||||
}
|
||||
return major, minor, nil
|
||||
}
|
||||
|
||||
// SetContainersOverrideXattr will encode and set ContainersOverrideXattr.
|
||||
func SetContainersOverrideXattr(path string, stat Stat) error {
|
||||
value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode))
|
||||
value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor)
|
||||
return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0)
|
||||
}
|
||||
|
||||
func SafeChown(name string, uid, gid int) error {
|
||||
if runtime.GOOS == "darwin" {
|
||||
var mode os.FileMode = 0o0700
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil {
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) == 3 {
|
||||
val, err := strconv.ParseUint(attrs[2], 8, 32)
|
||||
if err == nil {
|
||||
mode = os.FileMode(val)
|
||||
}
|
||||
}
|
||||
stat := Stat{
|
||||
Mode: os.FileMode(0o0700),
|
||||
}
|
||||
value := Stat{IDPair{uid, gid}, mode}
|
||||
if err = SetContainersOverrideXattr(name, value); err != nil {
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil && xstat != nil {
|
||||
stat, err = parseOverrideXattr(xstat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat.Mode = st.Mode()
|
||||
}
|
||||
stat.IDs = IDPair{UID: uid, GID: gid}
|
||||
if err = SetContainersOverrideXattr(name, stat); err != nil {
|
||||
return err
|
||||
}
|
||||
uid = os.Getuid()
|
||||
@@ -453,19 +551,24 @@ func SafeChown(name string, uid, gid int) error {
|
||||
|
||||
func SafeLchown(name string, uid, gid int) error {
|
||||
if runtime.GOOS == "darwin" {
|
||||
var mode os.FileMode = 0o0700
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil {
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) == 3 {
|
||||
val, err := strconv.ParseUint(attrs[2], 8, 32)
|
||||
if err == nil {
|
||||
mode = os.FileMode(val)
|
||||
}
|
||||
}
|
||||
stat := Stat{
|
||||
Mode: os.FileMode(0o0700),
|
||||
}
|
||||
value := Stat{IDPair{uid, gid}, mode}
|
||||
if err = SetContainersOverrideXattr(name, value); err != nil {
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil && xstat != nil {
|
||||
stat, err = parseOverrideXattr(xstat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat.Mode = st.Mode()
|
||||
}
|
||||
stat.IDs = IDPair{UID: uid, GID: gid}
|
||||
if err = SetContainersOverrideXattr(name, stat); err != nil {
|
||||
return err
|
||||
}
|
||||
uid = os.Getuid()
|
||||
|
||||
4
vendor/github.com/containers/storage/pkg/ioutils/writers.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/ioutils/writers.go
generated
vendored
@@ -36,9 +36,9 @@ func (r *writeCloserWrapper) Close() error {
|
||||
}
|
||||
|
||||
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
||||
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
||||
func NewWriteCloserWrapper(w io.Writer, closer func() error) io.WriteCloser {
|
||||
return &writeCloserWrapper{
|
||||
Writer: r,
|
||||
Writer: w,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user