mirror of
https://github.com/containers/skopeo.git
synced 2025-04-27 19:05:32 +00:00
Compare commits
172 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
bb26ea90f9 | ||
|
4c55fce106 | ||
|
ee84e9ae0a | ||
|
5cf5a11e04 | ||
|
47bf2b4ef7 | ||
|
320a3e0775 | ||
|
9bf0c3bf7c | ||
|
ed34be71c6 | ||
|
32b8827d78 | ||
|
3755a3db63 | ||
|
44877b3af2 | ||
|
f4d30802b2 | ||
|
d66183b129 | ||
|
b74989dfbc | ||
|
25481e1a88 | ||
|
990ac07ff5 | ||
|
53a1b69591 | ||
|
c5ccf21893 | ||
|
f9e03e6c04 | ||
|
b39bf85a1a | ||
|
a6ff545f30 | ||
|
4920d7f172 | ||
|
c7dca2e3ac | ||
|
5b135b92a8 | ||
|
119816b17c | ||
|
f5c34db79d | ||
|
ddc2848b66 | ||
|
b6b6415286 | ||
|
610f30db60 | ||
|
f7ab0ed03a | ||
|
ecdf380b0c | ||
|
0e68f7bebd | ||
|
c6d7c5916a | ||
|
edfeb73504 | ||
|
f37a82cddf | ||
|
ba2f8b7ace | ||
|
11fc49b491 | ||
|
b78fa41e42 | ||
|
e8d9f916e0 | ||
|
da1bf9f7e3 | ||
|
5b0b0d3954 | ||
|
5b30cfe29c | ||
|
d62cbd6178 | ||
|
7983f20adb | ||
|
8d000f4522 | ||
|
1f49b2c0c0 | ||
|
f745bb46bc | ||
|
a31470d7a4 | ||
|
ec21960402 | ||
|
1a38d97653 | ||
|
115f3727e8 | ||
|
49569bcf69 | ||
|
e7e09255b4 | ||
|
dd71592115 | ||
|
9c0f31dcce | ||
|
9fda7e7304 | ||
|
a0799484c8 | ||
|
603d37c588 | ||
|
a51828764c | ||
|
cb7a78c860 | ||
|
b8637922e2 | ||
|
48abc39c54 | ||
|
56ccf09c68 | ||
|
ceabd93f4a | ||
|
03fa889da5 | ||
|
c87f3aeaac | ||
|
15132e6c1e | ||
|
e7fe80b5df | ||
|
4080a631b1 | ||
|
b88f8eccaa | ||
|
cc743c3c0f | ||
|
a88629df99 | ||
|
91c6aa613d | ||
|
9d3331be7b | ||
|
d00ea33dfa | ||
|
0f5d87a9c0 | ||
|
7bd0dc216f | ||
|
5d0807795d | ||
|
547141ce57 | ||
|
1de2d3bad9 | ||
|
a185498c73 | ||
|
070f2bcfaf | ||
|
fad5a31a42 | ||
|
41e4b1b7ac | ||
|
5744b9b49d | ||
|
e9340fbc69 | ||
|
934ea727a4 | ||
|
22ab1a3717 | ||
|
8a44fe6c8b | ||
|
2d79fec20c | ||
|
c7aaed7397 | ||
|
9d73060a2e | ||
|
0283441cf1 | ||
|
5c968d67b8 | ||
|
690b1ef3e3 | ||
|
841a1b61ae | ||
|
8c78c03347 | ||
|
8c2eff1dae | ||
|
a5916e63cc | ||
|
f88186e688 | ||
|
ff462b3dad | ||
|
a3ffb772e1 | ||
|
8c20592d78 | ||
|
44d0f7103a | ||
|
20746ae273 | ||
|
64361bde06 | ||
|
955233070b | ||
|
bd1ac4668f | ||
|
2b3701b7be | ||
|
59ec554738 | ||
|
83d0e76b83 | ||
|
04d65888a3 | ||
|
958d586c45 | ||
|
137a912c55 | ||
|
dbf619c027 | ||
|
52895bc6cd | ||
|
d18677e479 | ||
|
b78a415987 | ||
|
0eb4ad2f54 | ||
|
5eba061489 | ||
|
9764c99dbd | ||
|
54d235403a | ||
|
a81cb65fac | ||
|
7d6169d219 | ||
|
85fa4dff42 | ||
|
8380f284c7 | ||
|
ed0efc6932 | ||
|
3315da48b5 | ||
|
ab53f64473 | ||
|
c737e5daea | ||
|
653db36664 | ||
|
0633de6350 | ||
|
6483de4894 | ||
|
3d2c80f58f | ||
|
b5a13bccfd | ||
|
eae9e8862c | ||
|
392aa3f70f | ||
|
b68afb1a73 | ||
|
0eb0ac3707 | ||
|
2a47dff7e1 | ||
|
631555fc83 | ||
|
83efeea4f3 | ||
|
da5c8d6e7b | ||
|
deda96636b | ||
|
17ea741413 | ||
|
fb777d3906 | ||
|
bf5987896a | ||
|
568d5d1c6f | ||
|
f333a897f6 | ||
|
1866ecbda2 | ||
|
f7801f77cc | ||
|
b116c5bdce | ||
|
e288827449 | ||
|
27baed919d | ||
|
4863e05f5f | ||
|
a71a8b4c48 | ||
|
4541d649a6 | ||
|
96f3804385 | ||
|
be26f2eb2f | ||
|
e9755957df | ||
|
74488c4b86 | ||
|
2a3c8ee5b1 | ||
|
87f199fbaf | ||
|
f423f01d1b | ||
|
c5eaf49918 | ||
|
186e9b4f0b | ||
|
8896960688 | ||
|
f818827f6d | ||
|
0c25d2c9fb | ||
|
bae8ccd7fb | ||
|
181429435e | ||
|
293ac065b7 |
@ -21,7 +21,7 @@ env:
|
||||
SCRIPT_BASE: "./contrib/cirrus"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c20241107t210000z-f41f40d13"
|
||||
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
@ -62,7 +62,7 @@ doccheck_task:
|
||||
cpu: 4
|
||||
memory: 8
|
||||
env:
|
||||
BUILDTAGS: &withopengpg 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||
BUILDTAGS: &withopengpg 'containers_image_openpgp'
|
||||
script: |
|
||||
# TODO: Can't use 'runner.sh setup' inside container. However,
|
||||
# removing the pre-installed package is the only necessary step
|
||||
@ -194,7 +194,7 @@ test_skopeo_task:
|
||||
matrix:
|
||||
- name: "Skopeo Test" # N/B: Name ref. by hack/get_fqin.sh
|
||||
env:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
|
||||
BUILDTAGS: ''
|
||||
- name: "Skopeo Test w/ opengpg"
|
||||
env:
|
||||
BUILDTAGS: *withopengpg
|
||||
|
1
.fmf/version
Normal file
1
.fmf/version
Normal file
@ -0,0 +1 @@
|
||||
1
|
7
.github/workflows/check_cirrus_cron.yml
vendored
7
.github/workflows/check_cirrus_cron.yml
vendored
@ -17,4 +17,9 @@ jobs:
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_failures:
|
||||
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
secrets:
|
||||
SECRET_CIRRUS_API_KEY: ${{secrets.SECRET_CIRRUS_API_KEY}}
|
||||
ACTION_MAIL_SERVER: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
ACTION_MAIL_USERNAME: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
ACTION_MAIL_PASSWORD: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
ACTION_MAIL_SENDER: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
|
19
.github/workflows/rerun_cirrus_cron.yml
vendored
19
.github/workflows/rerun_cirrus_cron.yml
vendored
@ -1,19 +0,0 @@
|
||||
---
|
||||
|
||||
# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '01 01 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_rerun:
|
||||
uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
|
||||
secrets: inherit
|
@ -1,3 +1,13 @@
|
||||
---
|
||||
run:
|
||||
timeout: 5m
|
||||
version: "2"
|
||||
linters:
|
||||
settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
# Compared to golangci-lint v2.0.2 defaults, we don’t exclude
|
||||
# ST1003, ST1016, ST1020, ST1021, ST1022 as we don't hit those.
|
||||
- all
|
||||
- -ST1000 # Incorrect or missing package comment.
|
||||
- -ST1005 # Incorrectly formatted error string.
|
||||
exclusions:
|
||||
presets:
|
||||
- std-error-handling
|
||||
|
72
.packit.yaml
72
.packit.yaml
@ -9,6 +9,27 @@
|
||||
downstream_package_name: skopeo
|
||||
upstream_tag_template: v{version}
|
||||
|
||||
# These files get synced from upstream to downstream (Fedora / CentOS Stream) on every
|
||||
# propose-downstream job. This is done so tests maintained upstream can be run
|
||||
# downstream in Zuul CI and Bodhi.
|
||||
# Ref: https://packit.dev/docs/configuration#files_to_sync
|
||||
files_to_sync:
|
||||
- src: rpm/gating.yaml
|
||||
dest: gating.yaml
|
||||
delete: true
|
||||
- src: plans/
|
||||
dest: plans/
|
||||
delete: true
|
||||
mkpath: true
|
||||
- src: systemtest/tmt/
|
||||
dest: test/tmt/
|
||||
delete: true
|
||||
mkpath: true
|
||||
- src: .fmf/
|
||||
dest: .fmf/
|
||||
delete: true
|
||||
- .packit.yaml
|
||||
|
||||
packages:
|
||||
skopeo-fedora:
|
||||
pkg_tool: fedpkg
|
||||
@ -16,8 +37,6 @@ packages:
|
||||
skopeo-centos:
|
||||
pkg_tool: centpkg
|
||||
specfile_path: rpm/skopeo.spec
|
||||
skopeo-rhel:
|
||||
specfile_path: rpm/skopeo.spec
|
||||
skopeo-eln:
|
||||
specfile_path: rpm/skopeo.spec
|
||||
|
||||
@ -31,15 +50,12 @@ jobs:
|
||||
notifications: &copr_build_failure_notification
|
||||
failure_comment:
|
||||
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
||||
targets:
|
||||
- fedora-development-x86_64
|
||||
- fedora-development-aarch64
|
||||
- fedora-latest-x86_64
|
||||
- fedora-latest-aarch64
|
||||
targets: &fedora_copr_targets
|
||||
# This should generally be fedora-all-*, but we exclude Fedora 40 because it does not have Go 1.23.
|
||||
- fedora-latest-stable-x86_64
|
||||
- fedora-latest-stable-aarch64
|
||||
- fedora-40-x86_64
|
||||
- fedora-40-aarch64
|
||||
- fedora-development-x86_64
|
||||
- fedora-development-aarch64
|
||||
enable_net: true
|
||||
|
||||
- job: copr_build
|
||||
@ -59,23 +75,13 @@ jobs:
|
||||
trigger: pull_request
|
||||
packages: [skopeo-centos]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets:
|
||||
targets: ¢os_copr_targets
|
||||
- centos-stream-9-x86_64
|
||||
- centos-stream-9-aarch64
|
||||
- centos-stream-10-x86_64
|
||||
- centos-stream-10-aarch64
|
||||
enable_net: true
|
||||
|
||||
# Disabled until there is go 1.22 in epel-9
|
||||
# - job: copr_build
|
||||
# trigger: pull_request
|
||||
# packages: [skopeo-rhel]
|
||||
# notifications: *copr_build_failure_notification
|
||||
# targets:
|
||||
# - epel-9-x86_64
|
||||
# - epel-9-aarch64
|
||||
# enable_net: true
|
||||
|
||||
# Run on commit to main branch
|
||||
- job: copr_build
|
||||
trigger: commit
|
||||
@ -88,6 +94,32 @@ jobs:
|
||||
project: podman-next
|
||||
enable_net: true
|
||||
|
||||
# Tests on Fedora for main branch
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
packages: [skopeo-fedora]
|
||||
notifications: &test_failure_notification
|
||||
failure_comment:
|
||||
message: "Tests failed. @containers/packit-build please check."
|
||||
targets: *fedora_copr_targets
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- artifacts:
|
||||
- type: repository-file
|
||||
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
|
||||
|
||||
# Tests on CentOS Stream for main branch
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
packages: [skopeo-centos]
|
||||
notifications: *test_failure_notification
|
||||
targets: *centos_copr_targets
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- artifacts:
|
||||
- type: repository-file
|
||||
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/centos-stream-$releasever/rhcontainerbot-podman-next-centos-stream-$releasever.repo
|
||||
|
||||
# Sync to Fedora
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
|
@ -1,3 +1,3 @@
|
||||
## The skopeo Project Community Code of Conduct
|
||||
|
||||
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
||||
The skopeo project, as part of Podman Container Tools, follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
3
GOVERNANCE.md
Normal file
3
GOVERNANCE.md
Normal file
@ -0,0 +1,3 @@
|
||||
## The Skopeo Project Community Governance
|
||||
|
||||
The Skopeo project, as part of Podman Container Tools, follows the [Podman Project Governance](https://github.com/containers/podman/blob/main/GOVERNANCE.md).
|
29
MAINTAINERS.md
Normal file
29
MAINTAINERS.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Skopeo Maintainers
|
||||
|
||||
[GOVERNANCE.md](https://github.com/containers/podman/blob/main/GOVERNANCE.md)
|
||||
describes the project's governance and the Project Roles used below.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Project Roles | Affiliation |
|
||||
|-------------------|----------------------------------------------------------|----------------------------------|----------------------------------------------|
|
||||
| Brent Baude | [baude](https://github.com/baude) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Nalin Dahyabhai | [nalind](https://github.com/nalind) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Matthew Heon | [mheon](https://github.com/mheon) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Paul Holzinger | [Luap99](https://github.com/Luap99) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Giuseppe Scrivano | [giuseppe](https://github.com/giuseppe) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Miloslav Trmač | [mtrmac](https://github.com/mtrmac) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Neil Smith | [Neil-Smith](https://github.com/Neil-Smith) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Tom Sweeney | [TomSweeneyRedHat](https://github.com/TomSweeneyRedHat/) | Maintainer and Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Lokesh Mandvekar | [lsm5](https://github.com/lsm5) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Dan Walsh | [rhatdan](https://github.com/rhatdan) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Ashley Cui | [ashley-cui](https://github.com/ashley-cui) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Valentin Rothberg | [vrothberg](https://github.com/vrothberg) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
|
||||
## Alumni
|
||||
|
||||
None at present
|
||||
|
||||
## Credits
|
||||
|
||||
The structure of this document was based off of the equivalent one in the [CRI-O Project](https://github.com/cri-o/cri-o/blob/main/MAINTAINERS.md).
|
10
Makefile
10
Makefile
@ -27,7 +27,7 @@ GOARCH ?= $(shell go env GOARCH)
|
||||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 1.61.0
|
||||
GOLANGCI_LINT_VERSION := 2.1.2
|
||||
|
||||
ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
@ -90,7 +90,7 @@ SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBSUBID_BUILD_TAG = $(shell hack/libsubid_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBSUBID_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
@ -237,7 +237,11 @@ test-all-local: validate-local validate-docs test-unit-local
|
||||
validate-local:
|
||||
hack/validate-git-marks.sh
|
||||
hack/validate-gofmt.sh
|
||||
GOBIN=$(GOBIN) hack/validate-lint.sh
|
||||
$(GOBIN)/golangci-lint run --build-tags "${BUILDTAGS}"
|
||||
# An extra run with --tests=false allows detecting code unused outside of tests;
|
||||
# ideally the linter should be able to find this automatically.
|
||||
# Since everything is already cached, this additional run doesn't take much time.
|
||||
$(GOBIN)/golangci-lint run --build-tags "${BUILDTAGS}" --tests=false
|
||||
BUILDTAGS="${BUILDTAGS}" hack/validate-vet.sh
|
||||
|
||||
# This invokes bin/skopeo, hence cannot be run as part of validate-local
|
||||
|
17
OWNERS
17
OWNERS
@ -1,17 +1,22 @@
|
||||
approvers:
|
||||
- mtrmac
|
||||
- baude
|
||||
- giuseppe
|
||||
- lsm5
|
||||
- TomSweeneyRedHat
|
||||
- Luap99
|
||||
- mheon
|
||||
- mtrmac
|
||||
- nalind
|
||||
- rhatdan
|
||||
- vrothberg
|
||||
- TomSweeneyRedHat
|
||||
reviewers:
|
||||
- ashley-cui
|
||||
- baude
|
||||
- giuseppe
|
||||
- containers/image-maintainers
|
||||
- lsm5
|
||||
- Luap99
|
||||
- mheon
|
||||
- mtrmac
|
||||
- QiWang19
|
||||
- nalind
|
||||
- rhatdan
|
||||
- runcom
|
||||
- TomSweeneyRedHat
|
||||
- vrothberg
|
||||
|
12
ROADMAP.md
Normal file
12
ROADMAP.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Skopeo Roadmap
|
||||
|
||||
Skopeo intends to mostly continue to be a very thin CLI wrapper over the [https://github.com/containers/image](containers/image) library, with most features being added there, not to this repo. A typical new Skopeo feature would only add a CLI for a recent containers/image feature.
|
||||
|
||||
## Future feature focus (most of the work must be done in the containers/image library)
|
||||
|
||||
* OCI artifact support.
|
||||
* Integration of composefs.
|
||||
* Partial pull support (zstd:chunked).
|
||||
* Performance and stability improvements.
|
||||
* Reductions to the size of the Skopeo binary.
|
||||
* `skopeo sync` exists, and bugs in it should be fixed, but we don’t have much of an ambition to compete with much larger projects like [https://github.com/openshift/oc-mirror](oc-mirror).
|
@ -1,15 +1,52 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/docker"
|
||||
dockerArchive "github.com/containers/image/v5/docker/archive"
|
||||
ociArchive "github.com/containers/image/v5/oci/archive"
|
||||
oci "github.com/containers/image/v5/oci/layout"
|
||||
"github.com/containers/image/v5/sif"
|
||||
"github.com/containers/image/v5/tarball"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/spf13/cobra"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// autocompleteSupportedTransports list all supported transports with the colon suffix.
|
||||
func autocompleteSupportedTransports(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func autocompleteImageNames(cmd *cobra.Command, args []string, toComplete string) ([]cobra.Completion, cobra.ShellCompDirective) {
|
||||
transport, details, haveTransport := strings.Cut(toComplete, ":")
|
||||
if !haveTransport {
|
||||
transports := supportedTransportSuggestions()
|
||||
return transports, cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
switch transport {
|
||||
case ociArchive.Transport.Name(), dockerArchive.Transport.Name():
|
||||
// Can have [:{*reference|@source-index}]
|
||||
// FIXME: `oci-archive:/path/to/a.oci:<TAB>` completes paths
|
||||
return nil, cobra.ShellCompDirectiveNoSpace
|
||||
case sif.Transport.Name():
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
|
||||
// Both directory and oci should have ShellCompDirectiveFilterDirs to complete only directories, but it doesn't currently work in bash: https://github.com/spf13/cobra/issues/2242
|
||||
case oci.Transport.Name():
|
||||
// Can have '[:{reference|@source-index}]'
|
||||
// FIXME: `oci:/path/to/dir/:<TAB>` completes paths
|
||||
return nil, cobra.ShellCompDirectiveDefault | cobra.ShellCompDirectiveNoSpace
|
||||
case directory.Transport.Name():
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
|
||||
case docker.Transport.Name():
|
||||
if details == "" {
|
||||
return []cobra.Completion{transport + "://"}, cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
return nil, cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
// supportedTransportSuggestions list all supported transports with the colon suffix.
|
||||
func supportedTransportSuggestions() []string {
|
||||
tps := transports.ListNames()
|
||||
suggestions := make([]string, 0, len(tps))
|
||||
suggestions := make([]cobra.Completion, 0, len(tps))
|
||||
for _, tp := range tps {
|
||||
// ListNames is generally expected to filter out deprecated transports.
|
||||
// tarball: is not deprecated, but it is only usable from a Go caller (using tarball.ConfigUpdater),
|
||||
@ -18,5 +55,5 @@ func autocompleteSupportedTransports(cmd *cobra.Command, args []string, toComple
|
||||
suggestions = append(suggestions, tp+":")
|
||||
}
|
||||
}
|
||||
return suggestions, cobra.ShellCompDirectiveNoFileComp
|
||||
return suggestions
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
ValidArgsFunction: autocompleteImageNames,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
|
18
cmd/skopeo/copy_test.go
Normal file
18
cmd/skopeo/copy_test.go
Normal file
@ -0,0 +1,18 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1"},
|
||||
{"a1", "a2", "a3"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"--insecure-policy", "copy"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly two arguments expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in integration and systemtest
|
||||
}
|
@ -37,7 +37,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo delete docker://registry.example.com/example/pause:latest`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
ValidArgsFunction: autocompleteImageNames,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
|
@ -53,7 +53,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
Example: `skopeo inspect docker://registry.fedoraproject.org/fedora
|
||||
skopeo inspect --config docker://docker.io/alpine
|
||||
skopeo inspect --format "Name: {{.Name}} Digest: {{.Digest}}" docker://registry.access.redhat.com/ubi8`,
|
||||
ValidArgsFunction: autocompleteSupportedTransports,
|
||||
ValidArgsFunction: autocompleteImageNames,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
@ -106,8 +106,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}
|
||||
}()
|
||||
|
||||
unparsedInstance := image.UnparsedInstance(src, nil)
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
rawManifest, _, err = src.GetManifest(ctx, nil)
|
||||
rawManifest, _, err = unparsedInstance.Manifest(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return fmt.Errorf("Error retrieving manifest for image: %w", err)
|
||||
@ -122,7 +123,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return nil
|
||||
}
|
||||
|
||||
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil))
|
||||
img, err := image.FromUnparsedImage(ctx, sys, unparsedInstance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing manifest for image: %w", err)
|
||||
}
|
||||
|
@ -151,12 +151,22 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return fmt.Errorf("%w (close error: %v)", err, closeErr)
|
||||
defer func() {
|
||||
if err := r.Close(); err != nil {
|
||||
retErr = noteCloseFailure(retErr, fmt.Sprintf("closing blob %q", bd.digest.String()), err)
|
||||
}
|
||||
}()
|
||||
verifier := bd.digest.Verifier()
|
||||
tr := io.TeeReader(r, verifier)
|
||||
if _, err := dest.PutBlob(ctx, tr, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(io.Discard, tr); err != nil { // Ensure we process all of tr, so that we can validate the digest.
|
||||
return err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
return fmt.Errorf("corrupt blob %q", bd.digest.String())
|
||||
}
|
||||
}
|
||||
|
||||
var manifest []byte
|
||||
|
@ -6,7 +6,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
@ -16,7 +17,6 @@ import (
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// tagListOutput is the output format of (skopeo list-tags), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
@ -38,8 +38,7 @@ var transportHandlers = map[string]func(ctx context.Context, sys *types.SystemCo
|
||||
|
||||
// supportedTransports returns all the supported transports
|
||||
func supportedTransports(joinStr string) string {
|
||||
res := maps.Keys(transportHandlers)
|
||||
sort.Strings(res)
|
||||
res := slices.Sorted(maps.Keys(transportHandlers))
|
||||
return strings.Join(res, joinStr)
|
||||
}
|
||||
|
||||
|
@ -54,3 +54,17 @@ func TestDockerRepositoryReferenceParserDrift(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListTags(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"list-tags"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly one non-option argument expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in systemtest
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
@ -93,7 +92,8 @@ import (
|
||||
// 0.2.4: Added OpenImageOptional
|
||||
// 0.2.5: Added LayerInfoJSON
|
||||
// 0.2.6: Policy Verification before pulling OCI
|
||||
const protocolVersion = "0.2.6"
|
||||
// 0.2.7: Added GetLayerInfoPiped
|
||||
const protocolVersion = "0.2.7"
|
||||
|
||||
// maxMsgSize is the current limit on a packet size.
|
||||
// Note that all non-metadata (i.e. payload data) is sent over a pipe.
|
||||
@ -277,7 +277,7 @@ func (h *proxyHandler) openImageImpl(args []any, allowNotFound bool) (retReplyBu
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// OpenImage accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
|
||||
// OpenImageOptional accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
|
||||
// The return value is an opaque integer handle. If the image does not exist, zero
|
||||
// is returned.
|
||||
func (h *proxyHandler) OpenImageOptional(args []any) (replyBuf, error) {
|
||||
@ -619,9 +619,10 @@ func (h *proxyHandler) GetBlob(args []any) (replyBuf, error) {
|
||||
|
||||
// GetLayerInfo returns data about the layers of an image, useful for reading the layer contents.
|
||||
//
|
||||
// This needs to be called since the data returned by GetManifest() does not allow to correctly
|
||||
// calling GetBlob() for the containers-storage: transport (which doesn’t store the original compressed
|
||||
// representations referenced in the manifest).
|
||||
// This is the same as GetLayerInfoPiped, but returns its contents inline. This is subject to
|
||||
// failure for large images (because we use SOCK_SEQPACKET which has a maximum buffer size)
|
||||
// and is hence only retained for backwards compatibility. Callers are expected to use
|
||||
// the semver to know whether they can call the new API.
|
||||
func (h *proxyHandler) GetLayerInfo(args []any) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
@ -667,6 +668,59 @@ func (h *proxyHandler) GetLayerInfo(args []any) (replyBuf, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetLayerInfoPiped returns data about the layers of an image, useful for reading the layer contents.
|
||||
//
|
||||
// This needs to be called since the data returned by GetManifest() does not allow to correctly
|
||||
// calling GetBlob() for the containers-storage: transport (which doesn’t store the original compressed
|
||||
// representations referenced in the manifest).
|
||||
func (h *proxyHandler) GetLayerInfoPiped(args []any) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("found %d args, expecting (imgid)", len(args))
|
||||
}
|
||||
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
layerInfos, err := img.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
if layerInfos == nil {
|
||||
layerInfos = img.LayerInfos()
|
||||
}
|
||||
|
||||
layers := make([]convertedLayerInfo, 0, len(layerInfos))
|
||||
for _, layer := range layerInfos {
|
||||
layers = append(layers, convertedLayerInfo{layer.Digest, layer.Size, layer.MediaType})
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(&layers)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
return h.returnBytes(nil, serialized)
|
||||
}
|
||||
|
||||
// FinishPipe waits for the worker goroutine to finish, and closes the write side of the pipe.
|
||||
func (h *proxyHandler) FinishPipe(args []any) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
@ -806,6 +860,8 @@ func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate
|
||||
rb, err = h.GetBlob(req.Args)
|
||||
case "GetLayerInfo":
|
||||
rb, err = h.GetLayerInfo(req.Args)
|
||||
case "GetLayerInfoPiped":
|
||||
rb, err = h.GetLayerInfoPiped(req.Args)
|
||||
case "FinishPipe":
|
||||
rb, err = h.FinishPipe(req.Args)
|
||||
case "Shutdown":
|
||||
|
@ -1,5 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
|
@ -44,3 +44,18 @@ func TestTLSVerifyConfig(t *testing.T) {
|
||||
err := yaml.Unmarshal([]byte(`tls-verify: "not a valid bool"`), &config)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1"},
|
||||
{"a1", "a2", "a3"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"sync"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Exactly two arguments expected")
|
||||
}
|
||||
|
||||
// FIXME: Much more test coverage
|
||||
// Actual feature tests exist in integration and systemtest
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
@ -26,7 +27,7 @@ import (
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that the command’s help should be included.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
@ -62,7 +63,8 @@ func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd
|
||||
err := handler(args, c.OutOrStdout())
|
||||
var shouldDisplayUsage errorShouldDisplayUsage
|
||||
if errors.As(err, &shouldDisplayUsage) {
|
||||
return c.Help()
|
||||
c.SetOut(c.ErrOrStderr()) // This mutates c, but we are failing anyway.
|
||||
_ = c.Help() // Even if this failed, we prefer to report the original error
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -183,6 +185,7 @@ func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
fs.DurationVar(&opts.Delay, "retry-delay", 0*time.Second, "Fixed delay between retries. If not set, retry uses an exponential backoff delay.")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
@ -197,8 +200,8 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx.AuthFilePath = opts.shared.authFilePath
|
||||
ctx.DockerDaemonHost = opts.dockerDaemonHost
|
||||
ctx.DockerDaemonCertPath = opts.dockerCertPath
|
||||
if opts.dockerImageOptions.authFilePath.Present() {
|
||||
ctx.AuthFilePath = opts.dockerImageOptions.authFilePath.Value()
|
||||
if opts.authFilePath.Present() {
|
||||
ctx.AuthFilePath = opts.authFilePath.Value()
|
||||
}
|
||||
if opts.deprecatedTLSVerify != nil && opts.deprecatedTLSVerify.tlsVerify.Present() {
|
||||
// If both this deprecated option and a non-deprecated option is present, we use the latter value.
|
||||
|
@ -128,7 +128,7 @@ _run_system() {
|
||||
make test-system-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
req_env_vars SKOPEO_PATH BUILDTAGS
|
||||
req_env_vars SKOPEO_PATH
|
||||
|
||||
handler="_run_${1}"
|
||||
if [ "$(type -t $handler)" != "function" ]; then
|
||||
|
@ -204,7 +204,11 @@ Precompute digests to ensure layers are not uploaded that already exist on the d
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--src-username**
|
||||
|
||||
|
@ -66,7 +66,11 @@ Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
|
@ -65,7 +65,11 @@ Registry token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--shared-blob-dir** _directory_
|
||||
|
||||
|
@ -39,7 +39,11 @@ Bearer token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**=_bool_
|
||||
|
||||
|
@ -123,7 +123,13 @@ The passphare to use when signing with `--sign-by` or `--sign-by-sigstore-privat
|
||||
|
||||
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
|
||||
|
||||
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry.
|
||||
|
||||
**--retry-delay**
|
||||
|
||||
Fixed delay between retries. If not set (or set to 0s), retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--keep-going**
|
||||
If any errors occur during copying of images, those errors are logged and the process continues syncing rest of the images and finally fails at the end.
|
||||
|
109
go.mod
109
go.mod
@ -1,57 +1,56 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
// Minimum required golang version
|
||||
go 1.22.6
|
||||
go 1.23.0
|
||||
|
||||
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.3.0
|
||||
github.com/containers/common v0.61.0
|
||||
github.com/containers/image/v5 v5.33.0
|
||||
github.com/containers/ocicrypt v1.2.0
|
||||
github.com/containers/storage v1.56.0
|
||||
github.com/Masterminds/semver/v3 v3.3.1
|
||||
github.com/containers/common v0.62.3
|
||||
github.com/containers/image/v5 v5.34.3
|
||||
github.com/containers/ocicrypt v1.2.1
|
||||
github.com/containers/storage v1.58.0
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/moby/sys/capability v0.3.0
|
||||
github.com/moby/sys/capability v0.4.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
|
||||
golang.org/x/term v0.26.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/term v0.31.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.5 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.4 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v27.3.1+incompatible // indirect
|
||||
github.com/docker/docker v27.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
@ -65,20 +64,19 @@ require (
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/go-containerregistry v0.20.2 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.2-0.20250313123807-1ee6e1a1957a // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
@ -89,51 +87,50 @@ require (
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.1 // indirect
|
||||
github.com/opencontainers/selinux v1.12.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/proglottis/gpgme v0.1.4 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.4 // indirect
|
||||
github.com/sigstore/rekor v1.3.6 // indirect
|
||||
github.com/sigstore/sigstore v1.8.9 // indirect
|
||||
github.com/sigstore/rekor v1.3.8 // indirect
|
||||
github.com/sigstore/sigstore v1.8.12 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
||||
github.com/sylabs/sif/v2 v2.19.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.20.2 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.6 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.8.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
golang.org/x/crypto v0.29.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.9.0 // indirect
|
||||
golang.org/x/sys v0.27.0 // indirect
|
||||
golang.org/x/text v0.20.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.67.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/grpc v1.69.4 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
)
|
||||
|
290
go.sum
290
go.sum
@ -6,10 +6,10 @@ github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dY
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
|
||||
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
|
||||
github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
|
||||
@ -29,47 +29,47 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
|
||||
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
|
||||
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
|
||||
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
|
||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||
github.com/containers/common v0.61.0 h1:j/84PTqZIKKYy42OEJsZmjZ4g4Kq2ERuC3tqp2yWdh4=
|
||||
github.com/containers/common v0.61.0/go.mod h1:NGRISq2vTFPSbhNqj6MLwyes4tWSlCnqbJg7R77B8xc=
|
||||
github.com/containers/image/v5 v5.33.0 h1:6oPEFwTurf7pDTGw7TghqGs8K0+OvPtY/UyzU0B2DfE=
|
||||
github.com/containers/image/v5 v5.33.0/go.mod h1:T7HpASmvnp2H1u4cyckMvCzLuYgpD18dSmabSw0AcHk=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
|
||||
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
|
||||
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||
github.com/containers/common v0.62.3 h1:aOGryqXfW6aKBbHbqOveH7zB+ihavUN03X/2pUSvWFI=
|
||||
github.com/containers/common v0.62.3/go.mod h1:3R8kDox2prC9uj/a2hmXj/YjZz5sBEUNrcDiw51S0Lo=
|
||||
github.com/containers/image/v5 v5.34.3 h1:/cMgfyA4Y7ILH7nzWP/kqpkE5Df35Ek4bp5ZPvJOVmI=
|
||||
github.com/containers/image/v5 v5.34.3/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
|
||||
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
|
||||
github.com/containers/storage v1.56.0 h1:DZ9KSkj6M2tvj/4bBoaJu3QDHRl35BwsZ4kmLJS97ZI=
|
||||
github.com/containers/storage v1.56.0/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||
github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc=
|
||||
github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8=
|
||||
github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
|
||||
github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY=
|
||||
github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
|
||||
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
|
||||
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
@ -78,8 +78,8 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4=
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@ -89,10 +89,10 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
|
||||
github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@ -128,8 +128,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@ -150,15 +150,16 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
|
||||
github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@ -167,15 +168,12 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-multierror v1.1.2-0.20250313123807-1ee6e1a1957a h1:zTI4FFCOXw14aUC78fxMh4tS7jJI7Fm51sH4smjl+Fc=
|
||||
github.com/hashicorp/go-multierror v1.1.2-0.20250313123807-1ee6e1a1957a/go.mod h1:RYOtqYU2MvOrqUMooJlQoFFuqR6sazGdm1ubZTL++r8=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
@ -189,8 +187,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
@ -218,12 +216,12 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg=
|
||||
github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
|
||||
github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
|
||||
github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@ -237,20 +235,20 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eBSHhccVHy6lsBeM4=
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
|
||||
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
|
||||
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
|
||||
github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
||||
@ -260,10 +258,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
|
||||
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M=
|
||||
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
@ -279,10 +277,12 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99
|
||||
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
|
||||
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
@ -291,18 +291,20 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
|
||||
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
||||
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
|
||||
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
|
||||
github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk=
|
||||
github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w=
|
||||
github.com/sigstore/rekor v1.3.8 h1:B8kJI8mpSIXova4Jxa6vXdJyysRxFGsEsLKBDl0rRjA=
|
||||
github.com/sigstore/rekor v1.3.8/go.mod h1:/dHFYKSuxEygfDRnEwyJ+ZD6qoVYNXQdi1mJrKvKWsI=
|
||||
github.com/sigstore/sigstore v1.8.12 h1:S8xMVZbE2z9ZBuQUEG737pxdLjnbOIcFi5v9UFfkJFc=
|
||||
github.com/sigstore/sigstore v1.8.12/go.mod h1:+PYQAa8rfw0QdPpBcT+Gl3egKD9c+TUgAlF12H3Nmjo=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU=
|
||||
github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@ -313,22 +315,21 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
|
||||
github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
|
||||
github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
|
||||
github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
|
||||
github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ=
|
||||
github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
|
||||
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 h1:LH5R3lXPfE2e3lIGxN7WNWv3Hl5nWO6LRi2B0L0ERHw=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1/go.mod h1:4XMvznPh8nfe2NpnDo1QTPvW9MVkUhbG90mPWvmOzcQ=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
@ -350,24 +351,24 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@ -376,12 +377,15 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
|
||||
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@ -389,8 +393,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -404,11 +411,14 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -416,8 +426,12 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -429,26 +443,36 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
|
||||
golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@ -459,8 +483,10 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -470,18 +496,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
|
||||
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -491,17 +517,15 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -1,7 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo btrfs_noversion
|
||||
fi
|
@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
errors=$($GOBIN/golangci-lint run --build-tags "${BUILDTAGS}" 2>&1)
|
||||
|
||||
if [ -z "$errors" ]; then
|
||||
echo 'Congratulations! All Go source files have been linted.'
|
||||
else
|
||||
{
|
||||
echo "Errors from golangci-lint:"
|
||||
echo "$errors"
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "golangci-lint" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
exit 1
|
||||
fi
|
@ -139,7 +139,7 @@ located at [https://github.com/containers/image_build/tree/main/skopeo](https://
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
To build the `skopeo` binary you need at least Go 1.22.
|
||||
To build the `skopeo` binary you need at least Go 1.23.
|
||||
|
||||
There are two ways to build skopeo: in a container, or locally without a
|
||||
container. Choose the one which better matches your needs and environment.
|
||||
|
@ -1,5 +1,4 @@
|
||||
//go:build openshift_shell
|
||||
// +build openshift_shell
|
||||
|
||||
package main
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -153,7 +154,7 @@ func (cluster *openshiftCluster) prepareRegistryConfig(t *testing.T) {
|
||||
require.Equal(t, "", string(out))
|
||||
}
|
||||
|
||||
// startRegistry starts the OpenShift registry with configPart on port, waits for it to be ready, and returns the process object, or terminates on failure.
|
||||
// startRegistryProcess starts the OpenShift registry with configPart on port, waits for it to be ready, and returns the process object, or terminates on failure.
|
||||
func (cluster *openshiftCluster) startRegistryProcess(t *testing.T, port uint16, configPath string) *exec.Cmd {
|
||||
cmd := cluster.clusterCmd(map[string]string{
|
||||
"KUBECONFIG": "openshift.local.registry/openshift-registry.kubeconfig",
|
||||
@ -253,10 +254,10 @@ func (cluster *openshiftCluster) relaxImageSignerPermissions(t *testing.T) {
|
||||
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
func (cluster *openshiftCluster) tearDown(t *testing.T) {
|
||||
for i := len(cluster.processes) - 1; i >= 0; i-- {
|
||||
for _, process := range slices.Backward(cluster.processes) {
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = cluster.processes[i].Process.Kill()
|
||||
_ = process.Process.Kill()
|
||||
}
|
||||
if cluster.dockerDir != "" {
|
||||
err := os.RemoveAll(cluster.dockerDir)
|
@ -1,5 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
//go:build unix && !linux
|
||||
|
||||
package main
|
||||
|
@ -1,3 +1,5 @@
|
||||
//go:build unix
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -229,7 +231,8 @@ type byteFetch struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
// This exercises all the metadata fetching APIs.
|
||||
func runTestMetadataAPIs(p *proxy, img string) error {
|
||||
v, err := p.callNoFd("OpenImage", []any{img})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -291,6 +294,19 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
return fmt.Errorf("No CMD or ENTRYPOINT set")
|
||||
}
|
||||
|
||||
_, layerInfoBytes, err := p.callReadAllBytes("GetLayerInfoPiped", []any{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var layerInfoBytesData []interface{}
|
||||
err = json.Unmarshal(layerInfoBytes, &layerInfoBytesData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(layerInfoBytesData) == 0 {
|
||||
return fmt.Errorf("expected layer info data")
|
||||
}
|
||||
|
||||
// Also test this legacy interface
|
||||
_, ctrconfigBytes, err := p.callReadAllBytes("GetConfig", []any{imgid})
|
||||
if err != nil {
|
||||
@ -337,13 +353,13 @@ func (s *proxySuite) TestProxy() {
|
||||
p, err := newProxy()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runTestGetManifestAndConfig(p, knownNotManifestListedImageX8664)
|
||||
err = runTestMetadataAPIs(p, knownNotManifestListedImageX8664)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Testing image %s: %v", knownNotManifestListedImageX8664, err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = runTestGetManifestAndConfig(p, knownListImage)
|
||||
err = runTestMetadataAPIs(p, knownListImage)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Testing image %s: %v", knownListImage, err)
|
||||
}
|
||||
|
20
plans/main.fmf
Normal file
20
plans/main.fmf
Normal file
@ -0,0 +1,20 @@
|
||||
discover:
|
||||
how: fmf
|
||||
execute:
|
||||
how: tmt
|
||||
prepare:
|
||||
- when: distro == centos-stream or distro == rhel
|
||||
how: shell
|
||||
script: |
|
||||
dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$(rpm --eval '%{?rhel}').noarch.rpm
|
||||
dnf -y config-manager --set-enabled epel
|
||||
order: 10
|
||||
- when: initiator == packit
|
||||
how: shell
|
||||
script: |
|
||||
COPR_REPO_FILE="/etc/yum.repos.d/*podman-next*.repo"
|
||||
if compgen -G $COPR_REPO_FILE > /dev/null; then
|
||||
sed -i -n '/^priority=/!p;$apriority=1' $COPR_REPO_FILE
|
||||
fi
|
||||
dnf -y upgrade --allowerasing
|
||||
order: 20
|
16
rpm/gating.yaml
Normal file
16
rpm/gating.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
--- !Policy
|
||||
product_versions:
|
||||
- fedora-*
|
||||
decision_context:
|
||||
- bodhi_update_push_stable
|
||||
- bodhi_update_push_testing
|
||||
subject_type: koji_build
|
||||
rules:
|
||||
- !PassingTestCaseRule {test_case_name: fedora-ci.koji-build.tier0.functional}
|
||||
|
||||
--- !Policy
|
||||
product_versions:
|
||||
- rhel-*
|
||||
decision_context: osci_compose_gate
|
||||
rules:
|
||||
- !PassingTestCaseRule {test_case_name: osci.brew-build.tier0.functional}
|
@ -7,15 +7,6 @@
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
# RHEL's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we
|
||||
# set it separately here and do not depend on RHEL's go-[s]rpm-macros package
|
||||
# until that's fixed.
|
||||
# c9s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227328
|
||||
# c8s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227331
|
||||
%if %{defined rhel}
|
||||
%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
%global gomodulesmode GO111MODULE=on
|
||||
|
||||
# No btrfs on RHEL
|
||||
@ -23,10 +14,15 @@
|
||||
%define build_with_btrfs 1
|
||||
%endif
|
||||
|
||||
%if %{defined rhel}
|
||||
%define fips 1
|
||||
%endif
|
||||
|
||||
# Only used in official koji builds
|
||||
# Copr builds set a separate epoch for all environments
|
||||
%if %{defined fedora}
|
||||
%define conditional_epoch 1
|
||||
%define fakeroot 1
|
||||
%else
|
||||
%define conditional_epoch 2
|
||||
%endif
|
||||
@ -77,12 +73,14 @@ Requires: containers-common >= 4:1-21
|
||||
Command line utility to inspect images and repositories directly on Docker
|
||||
registries without the need to pull them
|
||||
|
||||
# NOTE: The tests subpackage is only intended for testing and will not be supported
|
||||
# for end-users and/or customers.
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
|
||||
Requires: %{name} = %{epoch}:%{version}-%{release}
|
||||
%if %{defined fedora}
|
||||
Requires: bats
|
||||
%if %{defined fakeroot}
|
||||
Requires: fakeroot
|
||||
%endif
|
||||
Requires: gnupg
|
||||
@ -122,9 +120,13 @@ export CGO_CFLAGS="$CGO_CFLAGS -m64 -mtune=generic -fcf-protection=full"
|
||||
|
||||
BASEBUILDTAGS="$(hack/libsubid_tag.sh)"
|
||||
%if %{defined build_with_btrfs}
|
||||
export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_tag.sh) $(hack/btrfs_installed_tag.sh)"
|
||||
export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh)"
|
||||
%else
|
||||
export BUILDTAGS="$BASEBUILDTAGS btrfs_noversion exclude_graphdriver_btrfs"
|
||||
export BUILDTAGS="$BASEBUILDTAGS exclude_graphdriver_btrfs"
|
||||
%endif
|
||||
|
||||
%if %{defined fips}
|
||||
export BUILDTAGS="$BUILDTAGS libtrust_openssl"
|
||||
%endif
|
||||
|
||||
# unset LDFLAGS earlier set from set_build_flags
|
||||
@ -146,6 +148,10 @@ cp -pav systemtest/* %{buildroot}/%{_datadir}/%{name}/test/system/
|
||||
#define license tag if not already defined
|
||||
%{!?_licensedir:%global license %doc}
|
||||
|
||||
# Include this to silence rpmlint.
|
||||
# Especially annoying if you use syntastic vim plugin.
|
||||
%check
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
|
@ -132,7 +132,7 @@ END_EXPECT
|
||||
@test "inspect: image unknown" {
|
||||
# non existing image
|
||||
run_skopeo 2 inspect containers-storage:non-existing-tag
|
||||
expect_output --substring "identifier is not an image" \
|
||||
expect_output --substring "does not resolve to an image ID" \
|
||||
"skopeo inspect containers-storage:010101010101"
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-${TEST_SOURCE_DIR}/../bin/skopeo}
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
# Default image to run as a local registry
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2.8.2}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN setup/teardown
|
||||
|
10
systemtest/tmt/main.fmf
Normal file
10
systemtest/tmt/main.fmf
Normal file
@ -0,0 +1,10 @@
|
||||
require:
|
||||
- bats
|
||||
- skopeo-tests
|
||||
|
||||
environment:
|
||||
SKOPEO_BINARY: /usr/bin/skopeo
|
||||
|
||||
summary: System test
|
||||
test: bash ./test.sh
|
||||
duration: 60m
|
13
systemtest/tmt/test.sh
Normal file
13
systemtest/tmt/test.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -exo pipefail
|
||||
|
||||
uname -r
|
||||
|
||||
rpm -q \
|
||||
bats \
|
||||
containers-common \
|
||||
skopeo \
|
||||
skopeo-tests \
|
||||
|
||||
bats /usr/share/skopeo/test/system
|
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
Documentation: https://godocs.io/github.com/BurntSushi/toml
|
||||
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
|
||||
|
||||
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
|
33
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
33
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// markDecodedRecursive is a helper to mark any key under the given tmap as
|
||||
// decoded, recursing as needed
|
||||
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
|
||||
for key := range tmap {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
if tmap, ok := tmap[key].(map[string]any); ok {
|
||||
md.context = append(md.context, key)
|
||||
markDecodedRecursive(md, tmap)
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
|
||||
if err != nil {
|
||||
return md.parseErr(err)
|
||||
}
|
||||
// Assume the Unmarshaler decoded everything, so mark all keys under
|
||||
// this table as decoded.
|
||||
if tmap, ok := data.(map[string]any); ok {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
if aot, ok := data.([]map[string]any); ok {
|
||||
for _, tmap := range aot {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error {
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
d := string(md.data)
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos.withCol(d),
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
input: d,
|
||||
}
|
||||
}
|
||||
|
||||
|
46
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
46
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
var mapKeysDirect, mapKeysSub []reflect.Value
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
mapKeysSub = append(mapKeysSub, mapKey)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
mapKeysDirect = append(mapKeysDirect, mapKey)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
|
||||
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
|
||||
for i, mapKey := range mapKeys {
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
val := eindirect(rv.MapIndex(mapKey))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{mapKey}, val, true)
|
||||
enc.writeKeyValue(Key{mapKey.String()}, val, true)
|
||||
if trailC || i != len(mapKeys)-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
enc.encode(key.add(mapKey), val)
|
||||
enc.encode(key.add(mapKey.String()), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
}
|
||||
}
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
if is32Bit {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
}
|
||||
// Need to make a copy because ... ehm, I don't know why... I guess
|
||||
// allocating a new array can cause it to fail(?)
|
||||
//
|
||||
// Done for: https://github.com/BurntSushi/toml/issues/430
|
||||
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
writeFields := func(fields [][]int) {
|
||||
writeFields := func(fields [][]int, totalFields int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{keyName}, fieldVal, true)
|
||||
if fieldIndex[0] != len(fields)-1 {
|
||||
if fieldIndex[0] != totalFields-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
if inline {
|
||||
enc.wf("{")
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
|
||||
l := len(fieldsDirect) + len(fieldsSub)
|
||||
writeFields(fieldsDirect, l)
|
||||
writeFields(fieldsSub, l)
|
||||
if inline {
|
||||
enc.wf("}")
|
||||
}
|
||||
|
69
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
69
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
@ -67,21 +67,36 @@ type ParseError struct {
|
||||
// Position of an error.
|
||||
type Position struct {
|
||||
Line int // Line number, starting at 1.
|
||||
Col int // Error column, starting at 1.
|
||||
Start int // Start of error, as byte offset starting at 0.
|
||||
Len int // Lenght in bytes.
|
||||
Len int // Length of the error in bytes.
|
||||
}
|
||||
|
||||
func (p Position) withCol(tomlFile string) Position {
|
||||
var (
|
||||
pos int
|
||||
lines = strings.Split(tomlFile, "\n")
|
||||
)
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= p.Start {
|
||||
p.Col = p.Start - pos + 1
|
||||
if p.Col < 1 { // Should never happen, but just in case.
|
||||
p.Col = 1
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
msg := pe.Message
|
||||
if msg == "" { // Error from errorf()
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
if pe.LastKey == "" {
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
|
||||
}
|
||||
return fmt.Sprintf("toml: line %d (last key %q): %s",
|
||||
pe.Position.Line, pe.LastKey, msg)
|
||||
pe.Position.Line, pe.LastKey, pe.Message)
|
||||
}
|
||||
|
||||
// ErrorWithPosition returns the error with detailed location context.
|
||||
@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||
return pe.Error()
|
||||
}
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
col = pe.column(lines)
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
|
||||
msg := pe.Message
|
||||
if msg == "" {
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
// TODO: don't show control characters as literals? This may not show up
|
||||
// well everywhere.
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
if pe.Position.Len == 1 {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
|
||||
msg, pe.Position.Line, col+1)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col)
|
||||
} else {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
|
||||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
|
||||
}
|
||||
if pe.Position.Line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
|
||||
@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||
diff := len(expanded) - len(lines[pe.Position.Line-1])
|
||||
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
|
||||
return m
|
||||
}
|
||||
|
||||
func (pe ParseError) column(lines []string) int {
|
||||
var pos, col int
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= pe.Position.Start {
|
||||
col = pe.Position.Start - pos
|
||||
if col < 0 { // Should never happen, but just in case.
|
||||
col = 0
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
|
||||
return col
|
||||
}
|
||||
|
||||
func expandTab(s string) string {
|
||||
var (
|
||||
b strings.Builder
|
||||
|
33
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
33
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
|
||||
func (lx *lexer) errorf(format string, values ...any) stateFn {
|
||||
if lx.atEOF {
|
||||
pos := lx.getPos()
|
||||
pos.Line--
|
||||
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
|
||||
pos.Line--
|
||||
}
|
||||
pos.Len = 1
|
||||
pos.Start = lx.pos - 1
|
||||
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
|
||||
@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemKeyEnd)
|
||||
return lexSkip(lx, lexValue)
|
||||
default:
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected '.' or '=', but got %q instead", r)
|
||||
}
|
||||
}
|
||||
@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
|
||||
if r == eof {
|
||||
return lx.errorf("unexpected EOF; expected value")
|
||||
}
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
|
||||
case 'x':
|
||||
r = lx.peek()
|
||||
if !isHex(r) {
|
||||
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
|
||||
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
|
||||
}
|
||||
return lexHexInteger
|
||||
}
|
||||
@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
|
||||
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
||||
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
|
||||
func isBareKeyChar(r rune, tomlNext bool) bool {
|
||||
if tomlNext {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-' ||
|
||||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
|
||||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
|
||||
(r >= 0x037f && r <= 0x1fff) ||
|
||||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
|
||||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
|
||||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
|
||||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
|
||||
(r >= 0x10000 && r <= 0xeffff)
|
||||
}
|
||||
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-'
|
||||
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') || r == '_' || r == '-'
|
||||
}
|
||||
|
3
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
3
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
|
||||
|
||||
// Like append(), but only increase the cap by 1.
|
||||
func (k Key) add(piece string) Key {
|
||||
if cap(k) > len(k) {
|
||||
return append(k, piece)
|
||||
}
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
|
17
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
17
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
|
||||
// it anyway.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
|
||||
data = data[2:]
|
||||
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
|
||||
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
|
||||
data = data[3:]
|
||||
}
|
||||
@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
|
||||
if i := strings.IndexRune(data[:ex], 0); i > -1 {
|
||||
return nil, ParseError{
|
||||
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
|
||||
Position: Position{Line: 1, Start: i, Len: 1},
|
||||
Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
|
||||
Line: 1,
|
||||
input: data,
|
||||
}
|
||||
@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
|
||||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
|
||||
func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||
func (p *parser) panicf(format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: p.pos,
|
||||
Position: p.pos.withCol(p.lx.input),
|
||||
Line: p.pos.Line,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
@ -123,10 +123,11 @@ func (p *parser) next() item {
|
||||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
Position: it.pos,
|
||||
Message: it.err.Error(),
|
||||
err: it.err,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Line,
|
||||
LastKey: p.current(),
|
||||
err: it.err,
|
||||
})
|
||||
}
|
||||
|
||||
@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// isHexis a superset of all the permissable characters surrounding an
|
||||
// isHex is a superset of all the permissible characters surrounding an
|
||||
// underscore.
|
||||
accept = isHex(r)
|
||||
}
|
||||
|
24
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
24
vendor/github.com/Masterminds/semver/v3/version.go
generated
vendored
@ -39,9 +39,11 @@ var (
|
||||
)
|
||||
|
||||
// semVerRegex is the regular expression used to parse a semantic version.
|
||||
const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
|
||||
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
|
||||
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
|
||||
// This is not the official regex from the semver spec. It has been modified to allow for loose handling
|
||||
// where versions like 2.1 are detected.
|
||||
const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
|
||||
`(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
|
||||
`(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
|
||||
|
||||
// Version represents a single semantic version.
|
||||
type Version struct {
|
||||
@ -146,8 +148,8 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
sv := &Version{
|
||||
metadata: m[8],
|
||||
pre: m[5],
|
||||
metadata: m[5],
|
||||
pre: m[4],
|
||||
original: v,
|
||||
}
|
||||
|
||||
@ -158,7 +160,7 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
if m[2] != "" {
|
||||
sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
|
||||
sv.minor, err = strconv.ParseUint(m[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
}
|
||||
@ -167,7 +169,7 @@ func NewVersion(v string) (*Version, error) {
|
||||
}
|
||||
|
||||
if m[3] != "" {
|
||||
sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
|
||||
sv.patch, err = strconv.ParseUint(m[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
||||
}
|
||||
@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool {
|
||||
func validatePrerelease(p string) error {
|
||||
eparts := strings.Split(p, ".")
|
||||
for _, p := range eparts {
|
||||
if containsOnly(p, num) {
|
||||
if p == "" {
|
||||
return ErrInvalidMetadata
|
||||
} else if containsOnly(p, num) {
|
||||
if len(p) > 1 && p[0] == '0' {
|
||||
return ErrSegmentStartsZero
|
||||
}
|
||||
@ -631,7 +635,9 @@ func validatePrerelease(p string) error {
|
||||
func validateMetadata(m string) error {
|
||||
eparts := strings.Split(m, ".")
|
||||
for _, p := range eparts {
|
||||
if !containsOnly(p, allowed) {
|
||||
if p == "" {
|
||||
return ErrInvalidMetadata
|
||||
} else if !containsOnly(p, allowed) {
|
||||
return ErrInvalidMetadata
|
||||
}
|
||||
}
|
||||
|
15
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
15
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@ -26,12 +26,13 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -45,10 +46,6 @@ import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// TestingController is Compression with some helper methods necessary for testing.
|
||||
type TestingController interface {
|
||||
Compression
|
||||
@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
|
||||
}
|
||||
if sampleEntry == nil {
|
||||
t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
|
||||
return
|
||||
}
|
||||
if targetEntry == nil {
|
||||
t.Fatalf("rewrite target not found")
|
||||
return
|
||||
}
|
||||
targetEntry.Offset = sampleEntry.Offset
|
||||
},
|
||||
@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
|
||||
func randomContents(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = runes[rand.Intn(len(runes))]
|
||||
bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes))))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b[i] = runes[int(bi.Int64())]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
6
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
6
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
@ -18,3 +18,9 @@ As a containerd sub-project, you will find the:
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
## Optional
|
||||
|
||||
By default, support for gogoproto is available along side the standard Google
|
||||
protobuf types.
|
||||
You can choose to leave gogo support out by using the `!no_gogo` build tag.
|
||||
|
89
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
89
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
@ -33,8 +32,16 @@ import (
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
registry = make(map[reflect.Type]string)
|
||||
handlers []handler
|
||||
)
|
||||
|
||||
type handler interface {
|
||||
Marshaller(interface{}) func() ([]byte, error)
|
||||
Unmarshaller(interface{}) func([]byte) error
|
||||
TypeURL(interface{}) string
|
||||
GetType(url string) (reflect.Type, bool)
|
||||
}
|
||||
|
||||
// Definitions of common error types used throughout typeurl.
|
||||
//
|
||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||
@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
return string(t.ProtoReflect().Descriptor().FullName()), nil
|
||||
case gogoproto.Message:
|
||||
return gogoproto.MessageName(t), nil
|
||||
default:
|
||||
for _, h := range handlers {
|
||||
if u := h.TypeURL(v); u != "" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
|
||||
}
|
||||
}
|
||||
@ -149,12 +159,19 @@ func MarshalAny(v interface{}) (Any, error) {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(t)
|
||||
}
|
||||
case gogoproto.Message:
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return gogoproto.Marshal(t)
|
||||
}
|
||||
default:
|
||||
marshal = json.Marshal
|
||||
for _, h := range handlers {
|
||||
if m := h.Marshaller(v); m != nil {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return m()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if marshal == nil {
|
||||
marshal = json.Marshal
|
||||
}
|
||||
}
|
||||
|
||||
url, err := TypeURL(v)
|
||||
@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||
}
|
||||
|
||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||
t, err := getTypeByUrl(typeURL)
|
||||
t, isProto, err := getTypeByUrl(typeURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
v = reflect.New(t.t).Interface()
|
||||
v = reflect.New(t).Interface()
|
||||
} else {
|
||||
// Validate interface type provided by client
|
||||
vURL, err := TypeURL(v)
|
||||
@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error)
|
||||
}
|
||||
}
|
||||
|
||||
if t.isProto {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
err = proto.Unmarshal(value, t)
|
||||
case gogoproto.Message:
|
||||
err = gogoproto.Unmarshal(value, t)
|
||||
if isProto {
|
||||
pm, ok := v.(proto.Message)
|
||||
if ok {
|
||||
return v, proto.Unmarshal(value, pm)
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if unmarshal := h.Unmarshaller(v); unmarshal != nil {
|
||||
return v, unmarshal(value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
return v, err
|
||||
// fallback to json unmarshaller
|
||||
return v, json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
type urlType struct {
|
||||
t reflect.Type
|
||||
isProto bool
|
||||
}
|
||||
|
||||
func getTypeByUrl(url string) (urlType, error) {
|
||||
func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) {
|
||||
mu.RLock()
|
||||
for t, u := range registry {
|
||||
if u == url {
|
||||
mu.RUnlock()
|
||||
return urlType{
|
||||
t: t,
|
||||
}, nil
|
||||
return t, false, nil
|
||||
}
|
||||
}
|
||||
mu.RUnlock()
|
||||
// fallback to proto registry
|
||||
t := gogoproto.MessageType(url)
|
||||
if t != nil {
|
||||
return urlType{
|
||||
// get the underlying Elem because proto returns a pointer to the type
|
||||
t: t.Elem(),
|
||||
isProto: true,
|
||||
}, nil
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
if errors.Is(err, protoregistry.NotFound) {
|
||||
for _, h := range handlers {
|
||||
if t, isProto := h.GetType(url); t != nil {
|
||||
return t, isProto, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
}
|
||||
empty := mt.New().Interface()
|
||||
return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil
|
||||
return reflect.TypeOf(empty).Elem(), true, nil
|
||||
}
|
||||
|
||||
func tryDereference(v interface{}) reflect.Type {
|
||||
|
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
//go:build !no_gogo
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
handlers = append(handlers, gogoHandler{})
|
||||
}
|
||||
|
||||
type gogoHandler struct{}
|
||||
|
||||
func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return func() ([]byte, error) {
|
||||
return gogoproto.Marshal(pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return func(dt []byte) error {
|
||||
return gogoproto.Unmarshal(dt, pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) TypeURL(v interface{}) string {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return gogoproto.MessageName(pm)
|
||||
}
|
||||
|
||||
func (gogoHandler) GetType(url string) (reflect.Type, bool) {
|
||||
t := gogoproto.MessageType(url)
|
||||
if t == nil {
|
||||
return nil, false
|
||||
}
|
||||
return t.Elem(), true
|
||||
}
|
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -109,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
@ -316,12 +316,15 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary.
|
||||
func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error {
|
||||
ociConfig, configErr := src.OCIConfig(ctx)
|
||||
// Do not fail on configErr here, this might be an artifact
|
||||
// and maybe nothing needs this to be a container image and to process the config.
|
||||
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", configErr)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||
|
||||
@ -331,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
// For a transitional period, this might trigger warnings because the Variant
|
||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||
// revert this check to only look for (OS, Architecture).
|
||||
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
|
||||
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
@ -339,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
|
||||
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don'
|
||||
type dirImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.AlwaysSupportsSignatures
|
||||
|
||||
|
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
@ -3,6 +3,7 @@ package daemon
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
||||
}
|
||||
switch serverURL.Scheme {
|
||||
case "unix": // Nothing
|
||||
case "npipe": // Nothing
|
||||
case "http":
|
||||
hc := httpConfig()
|
||||
opts = append(opts, dockerclient.WithHTTPClient(hc))
|
||||
@ -82,6 +84,11 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsc,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}, nil
|
||||
@ -92,6 +99,11 @@ func httpConfig() *http.Client {
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: nil,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}
|
||||
|
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error {
|
||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
switch {
|
||||
case resp.StatusCode == http.StatusUnauthorized:
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
for _, c := range parseAuthHeader(resp.Header) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error {
|
||||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
|
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -1056,6 +1056,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
|
||||
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
|
||||
if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err)
|
||||
}
|
||||
digestAlgorithm := desc.Digest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String())
|
||||
}
|
||||
|
||||
reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1065,6 +1074,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
|
||||
}
|
||||
actualDigest := digestAlgorithm.FromBytes(payload)
|
||||
if actualDigest != desc.Digest {
|
||||
return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String())
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -41,6 +41,7 @@ import (
|
||||
type dockerImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
ref dockerReference
|
||||
|
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
||||
}
|
||||
return
|
||||
}
|
||||
if parts >= len(chunks) {
|
||||
errs <- errors.New("too many parts returned by the server")
|
||||
break
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: p,
|
||||
|
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
type Destination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
|
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
@ -3,6 +3,7 @@ package docker
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package stubs
|
||||
|
||||
import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing.
|
||||
type IgnoresOriginalOCIConfig struct{}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return nil
|
||||
}
|
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
||||
// wrapped provides the private.ImageDestination operations
|
||||
// for a destination that only implements types.ImageDestination
|
||||
type wrapped struct {
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
types.ImageDestination
|
||||
|
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateOldDigest: list.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
return index.editInstances(editInstances)
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []Schema2ManifestDescriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
list.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
list.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
if editInstance.AddPlatform == nil {
|
||||
// Should we create a struct with empty fields instead?
|
||||
@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if len(addedEntries) != 0 {
|
||||
// slices.Clone() here to ensure a private backing array;
|
||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
list.Manifests = append(slices.Clone(list.Manifests), addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
func (list *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
|
||||
return &Schema2List{*public}
|
||||
}
|
||||
|
||||
func (index *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
|
||||
func (list *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic))
|
||||
}
|
||||
|
||||
func (index *Schema2List) Clone() ListPublic {
|
||||
return index.CloneInternal()
|
||||
func (list *Schema2List) Clone() ListPublic {
|
||||
return list.CloneInternal()
|
||||
}
|
||||
|
||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
|
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageSourceInternalOnly is the part of private.ImageSource that is not
|
||||
@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface {
|
||||
// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
|
||||
// on unsupported formats.
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
//go:build linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
//go:build !linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
||||
return d.unpackedDest.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -98,7 +99,7 @@ func ValidateScope(scope string) error {
|
||||
}
|
||||
|
||||
func validateScopeWindows(scope string) error {
|
||||
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
|
||||
matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope)
|
||||
if !matched {
|
||||
return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
|
||||
}
|
||||
@ -119,3 +120,31 @@ func validateScopeNonWindows(scope string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseOCIReferenceName parses the image from the oci reference.
|
||||
func parseOCIReferenceName(image string) (img string, index int, err error) {
|
||||
index = -1
|
||||
if strings.HasPrefix(image, "@") {
|
||||
idx, err := strconv.Atoi(image[1:])
|
||||
if err != nil {
|
||||
return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err)
|
||||
}
|
||||
if idx < 0 {
|
||||
return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx)
|
||||
}
|
||||
index = idx
|
||||
} else {
|
||||
img = image
|
||||
}
|
||||
return img, index, nil
|
||||
}
|
||||
|
||||
// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists
|
||||
func ParseReferenceIntoElements(reference string) (string, string, int, error) {
|
||||
dir, image := SplitPathAndImage(reference)
|
||||
image, index, err := parseOCIReferenceName(image)
|
||||
if err != nil {
|
||||
return "", "", -1, err
|
||||
}
|
||||
return dir, image, index, nil
|
||||
}
|
||||
|
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/reflink"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -27,6 +28,7 @@ import (
|
||||
type ociImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
@ -37,6 +39,9 @@ type ociImageDestination struct {
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
|
||||
if ref.sourceIndex != -1 {
|
||||
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
var index *imgspecv1.Index
|
||||
if indexExists(ref) {
|
||||
var err error
|
||||
@ -137,9 +142,21 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the
|
||||
// specific blob path determined by the blobDigest. The closed pointer indicates to the caller
|
||||
// whether blobFile has been closed or not.
|
||||
func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error {
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||
// On Windows, the “permissions of newly created files” argument to syscall.Open is
|
||||
@ -147,26 +164,27 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
// always fails on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := blobFile.Chmod(0644); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
|
||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||
// need to explicitly close the file, since a rename won't otherwise work on Windows
|
||||
blobFile.Close()
|
||||
explicitClosed = true
|
||||
*closed = true
|
||||
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
@ -299,6 +317,67 @@ func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options pri
|
||||
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
}
|
||||
|
||||
// PutBlobFromLocalFileOption is unused but may receive functionality in the future.
|
||||
type PutBlobFromLocalFileOption struct{}
|
||||
|
||||
// PutBlobFromLocalFile arranges the data from path to be used as blob with digest.
|
||||
// It computes, and returns, the digest and size of the used file.
|
||||
//
|
||||
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
|
||||
d, ok := dest.(*ociImageDestination)
|
||||
if !ok {
|
||||
return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||
}
|
||||
|
||||
succeeded := false
|
||||
blobFileClosed := false
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer func() {
|
||||
if !blobFileClosed {
|
||||
blobFile.Close()
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
srcFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
err = reflink.LinkOrCopy(srcFile, blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
_, err = blobFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
blobDigest, err := digest.FromReader(blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
fileInfo, err := blobFile.Stat()
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return blobDigest, fileInfo.Size(), nil
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
|
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
@ -61,22 +61,31 @@ type ociReference struct {
|
||||
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
||||
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
||||
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
||||
// If image=="", it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json
|
||||
// If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json.
|
||||
//
|
||||
// Must not be set if sourceIndex is set (the value is not -1).
|
||||
image string
|
||||
// If not -1, a zero-based index of an image in the manifest index. Valid only for sources.
|
||||
// Must not be set if image is set.
|
||||
sourceIndex int
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
||||
func ParseReference(reference string) (types.ImageReference, error) {
|
||||
dir, image := internal.SplitPathAndImage(reference)
|
||||
return NewReference(dir, image)
|
||||
dir, image, index, err := internal.ParseReferenceIntoElements(reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newReference(dir, image, index)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex.
|
||||
//
|
||||
// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used.
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) {
|
||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -90,7 +99,26 @@ func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
|
||||
if sourceIndex != -1 && sourceIndex < 0 {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex)
|
||||
}
|
||||
if sourceIndex != -1 && image != "" {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex)
|
||||
}
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil
|
||||
}
|
||||
|
||||
// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index.
|
||||
func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) {
|
||||
return newReference(dir, "", sourceIndex)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
//
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
return newReference(dir, image, -1)
|
||||
}
|
||||
|
||||
func (ref ociReference) Transport() types.ImageTransport {
|
||||
@ -103,7 +131,10 @@ func (ref ociReference) Transport() types.ImageTransport {
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref ociReference) StringWithinTransport() string {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
if ref.sourceIndex == -1 {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
}
|
||||
return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex)
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
@ -187,14 +218,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||
return imgspecv1.Descriptor{}, -1, err
|
||||
}
|
||||
|
||||
if ref.image == "" {
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
switch {
|
||||
case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references.
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
||||
ref.image, ref.sourceIndex)
|
||||
|
||||
case ref.sourceIndex != -1:
|
||||
if ref.sourceIndex >= len(index.Manifests) {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests))
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
} else {
|
||||
return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil
|
||||
|
||||
case ref.image != "":
|
||||
// if image specified, look through all manifests for a match
|
||||
var unsupportedMIMETypes []string
|
||||
for i, md := range index.Manifests {
|
||||
@ -208,8 +243,16 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||
if len(unsupportedMIMETypes) != 0 {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
|
||||
default:
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||
|
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package layout
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// This file is named reader.go for consistency with other transports’
|
||||
// handling of “image containers”, but we don’t actually need a stateful reader object.
|
||||
|
||||
// ListResult wraps the image reference and the manifest for loading
|
||||
type ListResult struct {
|
||||
Reference types.ImageReference
|
||||
ManifestDescriptor imgspecv1.Descriptor
|
||||
}
|
||||
|
||||
// List returns a slice of manifests included in the archive
|
||||
func List(dir string) ([]ListResult, error) {
|
||||
var res []ListResult
|
||||
|
||||
indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var index imgspecv1.Index
|
||||
if err := json.Unmarshal(indexJSON, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for manifestIndex, md := range index.Manifests {
|
||||
refName := md.Annotations[imgspecv1.AnnotationRefName]
|
||||
index := -1
|
||||
if refName == "" {
|
||||
index = manifestIndex
|
||||
}
|
||||
ref, err := newReference(dir, refName, index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating image reference: %w", err)
|
||||
}
|
||||
reference := ListResult{
|
||||
Reference: ref,
|
||||
ManifestDescriptor: md,
|
||||
}
|
||||
res = append(res, reference)
|
||||
}
|
||||
return res, nil
|
||||
}
|
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type openshiftImageDestination struct {
|
||||
@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
|
||||
return d.docker.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.docker.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
@ -744,6 +745,11 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
|
||||
// Enforce v2 format for drop-in-configs.
|
||||
dropIn, err := loadConfigFile(path, true)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
|
||||
}
|
||||
config.updateWithConfigurationFrom(dropIn)
|
||||
|
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
@ -3,6 +3,7 @@ package tlsclientconfig
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -36,12 +37,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
// Race with someone who deleted the
|
||||
// file after we read the directory's
|
||||
// list of contents?
|
||||
logrus.Warnf("error reading certificate %q: %v", fullPath, err)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return err
|
||||
|
28
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
28
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
@ -108,19 +108,10 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
|
||||
}
|
||||
}
|
||||
|
||||
untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes)
|
||||
untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes)
|
||||
if err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err))
|
||||
return nil, err
|
||||
}
|
||||
switch len(untrustedLeafCerts) {
|
||||
case 0:
|
||||
return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data")
|
||||
case 1:
|
||||
break // OK
|
||||
default:
|
||||
return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data")
|
||||
}
|
||||
untrustedCertificate := untrustedLeafCerts[0]
|
||||
|
||||
// Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs;
|
||||
// we match SAN ourselves, so override that.
|
||||
@ -195,6 +186,21 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
|
||||
return untrustedCertificate.PublicKey, nil
|
||||
}
|
||||
|
||||
func parseLeafCertFromPEM(untrustedCertificateBytes []byte) (*x509.Certificate, error) {
|
||||
untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes)
|
||||
if err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err))
|
||||
}
|
||||
switch len(untrustedLeafCerts) {
|
||||
case 0:
|
||||
return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data")
|
||||
case 1: // OK
|
||||
return untrustedLeafCerts[0], nil
|
||||
default:
|
||||
return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data")
|
||||
}
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
|
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
@ -20,7 +20,7 @@ func (f *fulcioTrustRoot) validate() error {
|
||||
return errors.New("fulcio disabled at compile-time")
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
return nil, errors.New("fulcio disabled at compile-time")
|
||||
|
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
@ -13,3 +13,12 @@ func (err InvalidSignatureError) Error() string {
|
||||
func NewInvalidSignatureError(msg string) InvalidSignatureError {
|
||||
return InvalidSignatureError{msg: msg}
|
||||
}
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
@ -40,15 +40,6 @@ type UntrustedRekorPayload struct {
|
||||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||
|
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
@ -10,6 +10,6 @@ import (
|
||||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
||||
}
|
||||
|
74
vendor/github.com/containers/image/v5/signature/pki_cert.go
generated
vendored
Normal file
74
vendor/github.com/containers/image/v5/signature/pki_cert.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
)
|
||||
|
||||
type pkiTrustRoot struct {
|
||||
caRootsCertificates *x509.CertPool
|
||||
caIntermediateCertificates *x509.CertPool
|
||||
subjectEmail string
|
||||
subjectHostname string
|
||||
}
|
||||
|
||||
func (p *pkiTrustRoot) validate() error {
|
||||
if p.subjectEmail == "" && p.subjectHostname == "" {
|
||||
return errors.New("Internal inconsistency: PKI use set up without subject email or subject hostname")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyPKI(pkiTrustRoot *pkiTrustRoot, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) {
|
||||
var untrustedIntermediatePool *x509.CertPool
|
||||
if pkiTrustRoot.caIntermediateCertificates != nil {
|
||||
untrustedIntermediatePool = pkiTrustRoot.caIntermediateCertificates.Clone()
|
||||
} else {
|
||||
untrustedIntermediatePool = x509.NewCertPool()
|
||||
}
|
||||
if len(untrustedIntermediateChainBytes) > 0 {
|
||||
untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes)
|
||||
if err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err))
|
||||
}
|
||||
if len(untrustedIntermediateChain) > 1 {
|
||||
for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] {
|
||||
untrustedIntermediatePool.AddCert(untrustedIntermediateCert)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := untrustedCertificate.Verify(x509.VerifyOptions{
|
||||
Intermediates: untrustedIntermediatePool,
|
||||
Roots: pkiTrustRoot.caRootsCertificates,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
|
||||
}); err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err))
|
||||
}
|
||||
|
||||
if pkiTrustRoot.subjectEmail != "" {
|
||||
if !slices.Contains(untrustedCertificate.EmailAddresses, pkiTrustRoot.subjectEmail) {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)",
|
||||
pkiTrustRoot.subjectEmail,
|
||||
untrustedCertificate.EmailAddresses))
|
||||
}
|
||||
}
|
||||
if pkiTrustRoot.subjectHostname != "" {
|
||||
if err = untrustedCertificate.VerifyHostname(pkiTrustRoot.subjectHostname); err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected subject hostname: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return untrustedCertificate.PublicKey, nil
|
||||
}
|
192
vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
generated
vendored
192
vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
generated
vendored
@ -71,6 +71,17 @@ func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedO
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithPKI specifies a value for the "pki" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithPKI(p PRSigstoreSignedPKI) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.PKI != nil {
|
||||
return InvalidPolicyFormatError(`"pki" already specified`)
|
||||
}
|
||||
pr.PKI = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
@ -159,8 +170,11 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned,
|
||||
if res.Fulcio != nil {
|
||||
keySources++
|
||||
}
|
||||
if res.PKI != nil {
|
||||
keySources++
|
||||
}
|
||||
if keySources != 1 {
|
||||
return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified")
|
||||
return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas, fulcio, and pki must be specified")
|
||||
}
|
||||
|
||||
rekorSources := 0
|
||||
@ -182,6 +196,9 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned,
|
||||
if res.Fulcio != nil && rekorSources == 0 {
|
||||
return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used")
|
||||
}
|
||||
if res.PKI != nil && rekorSources > 0 {
|
||||
return nil, InvalidPolicyFormatError("rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas are not supported for pki")
|
||||
}
|
||||
|
||||
if res.SignedIdentity == nil {
|
||||
return nil, InvalidPolicyFormatError("signedIdentity not specified")
|
||||
@ -218,9 +235,10 @@ var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
|
||||
func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
||||
*pr = prSigstoreSigned{}
|
||||
var tmp prSigstoreSigned
|
||||
var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool
|
||||
var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio, gotPKI bool
|
||||
var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool
|
||||
var fulcio prSigstoreSignedFulcio
|
||||
var pki prSigstoreSignedPKI
|
||||
var signedIdentity json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
switch key {
|
||||
@ -253,6 +271,9 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
||||
case "rekorPublicKeyDatas":
|
||||
gotRekorPublicKeyDatas = true
|
||||
return &tmp.RekorPublicKeyDatas
|
||||
case "pki":
|
||||
gotPKI = true
|
||||
return &pki
|
||||
case "signedIdentity":
|
||||
return &signedIdentity
|
||||
default:
|
||||
@ -303,6 +324,9 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
||||
if gotRekorPublicKeyDatas {
|
||||
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas))
|
||||
}
|
||||
if gotPKI {
|
||||
opts = append(opts, PRSigstoreSignedWithPKI(&pki))
|
||||
}
|
||||
opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity))
|
||||
|
||||
res, err := newPRSigstoreSigned(opts...)
|
||||
@ -440,3 +464,167 @@ func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error {
|
||||
*f = *res
|
||||
return nil
|
||||
}
|
||||
|
||||
// PRSigstoreSignedPKIOption is a way to pass values to NewPRSigstoreSignedPKI
|
||||
type PRSigstoreSignedPKIOption func(*prSigstoreSignedPKI) error
|
||||
|
||||
// PRSigstoreSignedPKIWithCARootsPath specifies a value for the "caRootsPath" field when calling NewPRSigstoreSignedPKI
|
||||
func PRSigstoreSignedPKIWithCARootsPath(caRootsPath string) PRSigstoreSignedPKIOption {
|
||||
return func(p *prSigstoreSignedPKI) error {
|
||||
if p.CARootsPath != "" {
|
||||
return InvalidPolicyFormatError(`"caRootsPath" already specified`)
|
||||
}
|
||||
p.CARootsPath = caRootsPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedPKIWithCARootsData specifies a value for the "caRootsData" field when calling NewPRSigstoreSignedPKI
|
||||
func PRSigstoreSignedPKIWithCARootsData(caRootsData []byte) PRSigstoreSignedPKIOption {
|
||||
return func(p *prSigstoreSignedPKI) error {
|
||||
if p.CARootsData != nil {
|
||||
return InvalidPolicyFormatError(`"caRootsData" already specified`)
|
||||
}
|
||||
p.CARootsData = caRootsData
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedPKIWithCAIntermediatesPath specifies a value for the "caIntermediatesPath" field when calling NewPRSigstoreSignedPKI
|
||||
func PRSigstoreSignedPKIWithCAIntermediatesPath(caIntermediatesPath string) PRSigstoreSignedPKIOption {
|
||||
return func(p *prSigstoreSignedPKI) error {
|
||||
if p.CAIntermediatesPath != "" {
|
||||
return InvalidPolicyFormatError(`"caIntermediatesPath" already specified`)
|
||||
}
|
||||
p.CAIntermediatesPath = caIntermediatesPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedPKIWithCAIntermediatesData specifies a value for the "caIntermediatesData" field when calling NewPRSigstoreSignedPKI
|
||||
func PRSigstoreSignedPKIWithCAIntermediatesData(caIntermediatesData []byte) PRSigstoreSignedPKIOption {
|
||||
return func(p *prSigstoreSignedPKI) error {
|
||||
if p.CAIntermediatesData != nil {
|
||||
return InvalidPolicyFormatError(`"caIntermediatesData" already specified`)
|
||||
}
|
||||
p.CAIntermediatesData = caIntermediatesData
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedPKIWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedPKI
|
||||
func PRSigstoreSignedPKIWithSubjectEmail(subjectEmail string) PRSigstoreSignedPKIOption {
|
||||
return func(p *prSigstoreSignedPKI) error {
|
||||
if p.SubjectEmail != "" {
|
||||
return InvalidPolicyFormatError(`"subjectEmail" already specified`)
|
||||
}
|
||||
p.SubjectEmail = subjectEmail
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedPKIWithSubjectHostname specifies a value for the "subjectHostname" field when calling NewPRSigstoreSignedPKI
|
||||
func PRSigstoreSignedPKIWithSubjectHostname(subjectHostname string) PRSigstoreSignedPKIOption {
|
||||
return func(p *prSigstoreSignedPKI) error {
|
||||
if p.SubjectHostname != "" {
|
||||
return InvalidPolicyFormatError(`"subjectHostname" already specified`)
|
||||
}
|
||||
p.SubjectHostname = subjectHostname
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// newPRSigstoreSignedPKI is NewPRSigstoreSignedPKI, except it returns the private type
|
||||
func newPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (*prSigstoreSignedPKI, error) {
|
||||
res := prSigstoreSignedPKI{}
|
||||
for _, o := range options {
|
||||
if err := o(&res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if res.CARootsPath != "" && res.CARootsData != nil {
|
||||
return nil, InvalidPolicyFormatError("caRootsPath and caRootsData cannot be used simultaneously")
|
||||
}
|
||||
if res.CARootsPath == "" && res.CARootsData == nil {
|
||||
return nil, InvalidPolicyFormatError("At least one of caRootsPath and caRootsData must be specified")
|
||||
}
|
||||
|
||||
if res.CAIntermediatesPath != "" && res.CAIntermediatesData != nil {
|
||||
return nil, InvalidPolicyFormatError("caIntermediatesPath and caIntermediatesData cannot be used simultaneously")
|
||||
}
|
||||
|
||||
if res.SubjectEmail == "" && res.SubjectHostname == "" {
|
||||
return nil, InvalidPolicyFormatError("At least one of subjectEmail, subjectHostname must be specified")
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// NewPRSigstoreSignedPKI returns a PRSigstoreSignedPKI based on options.
|
||||
func NewPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (PRSigstoreSignedPKI, error) {
|
||||
return newPRSigstoreSignedPKI(options...)
|
||||
}
|
||||
|
||||
// Compile-time check that prSigstoreSignedPKI implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prSigstoreSignedPKI)(nil)
|
||||
|
||||
func (p *prSigstoreSignedPKI) UnmarshalJSON(data []byte) error {
|
||||
*p = prSigstoreSignedPKI{}
|
||||
var tmp prSigstoreSignedPKI
|
||||
var gotCARootsPath, gotCARootsData, gotCAIntermediatesPath, gotCAIntermediatesData, gotSubjectEmail, gotSubjectHostname bool
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
switch key {
|
||||
case "caRootsPath":
|
||||
gotCARootsPath = true
|
||||
return &tmp.CARootsPath
|
||||
case "caRootsData":
|
||||
gotCARootsData = true
|
||||
return &tmp.CARootsData
|
||||
case "caIntermediatesPath":
|
||||
gotCAIntermediatesPath = true
|
||||
return &tmp.CAIntermediatesPath
|
||||
case "caIntermediatesData":
|
||||
gotCAIntermediatesData = true
|
||||
return &tmp.CAIntermediatesData
|
||||
case "subjectEmail":
|
||||
gotSubjectEmail = true
|
||||
return &tmp.SubjectEmail
|
||||
case "subjectHostname":
|
||||
gotSubjectHostname = true
|
||||
return &tmp.SubjectHostname
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var opts []PRSigstoreSignedPKIOption
|
||||
if gotCARootsPath {
|
||||
opts = append(opts, PRSigstoreSignedPKIWithCARootsPath(tmp.CARootsPath))
|
||||
}
|
||||
if gotCARootsData {
|
||||
opts = append(opts, PRSigstoreSignedPKIWithCARootsData(tmp.CARootsData))
|
||||
}
|
||||
if gotCAIntermediatesPath {
|
||||
opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesPath(tmp.CAIntermediatesPath))
|
||||
}
|
||||
if gotCAIntermediatesData {
|
||||
opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesData(tmp.CAIntermediatesData))
|
||||
}
|
||||
if gotSubjectEmail {
|
||||
opts = append(opts, PRSigstoreSignedPKIWithSubjectEmail(tmp.SubjectEmail))
|
||||
}
|
||||
if gotSubjectHostname {
|
||||
opts = append(opts, PRSigstoreSignedPKIWithSubjectHostname(tmp.SubjectHostname))
|
||||
}
|
||||
|
||||
res, err := newPRSigstoreSignedPKI(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*p = *res
|
||||
return nil
|
||||
}
|
||||
|
99
vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
generated
vendored
99
vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
generated
vendored
@ -97,11 +97,64 @@ func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
|
||||
return &fulcio, nil
|
||||
}
|
||||
|
||||
// prepareTrustRoot creates a pkiTrustRoot from the input data.
|
||||
// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.)
|
||||
func (p *prSigstoreSignedPKI) prepareTrustRoot() (*pkiTrustRoot, error) {
|
||||
caRootsCertPEMs, err := loadBytesFromConfigSources(configBytesSources{
|
||||
inconsistencyErrorMessage: `Internal inconsistency: both "caRootsPath" and "caRootsData" specified`,
|
||||
path: p.CARootsPath,
|
||||
data: p.CARootsData,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(caRootsCertPEMs) != 1 {
|
||||
return nil, errors.New(`Internal inconsistency: PKI specified with not exactly one of "caRootsPath" nor "caRootsData"`)
|
||||
}
|
||||
rootsCerts := x509.NewCertPool()
|
||||
if ok := rootsCerts.AppendCertsFromPEM(caRootsCertPEMs[0]); !ok {
|
||||
return nil, errors.New("error loading PKI CA Roots certificates")
|
||||
}
|
||||
pki := pkiTrustRoot{
|
||||
caRootsCertificates: rootsCerts,
|
||||
subjectEmail: p.SubjectEmail,
|
||||
subjectHostname: p.SubjectHostname,
|
||||
}
|
||||
caIntermediateCertPEMs, err := loadBytesFromConfigSources(configBytesSources{
|
||||
inconsistencyErrorMessage: `Internal inconsistency: both "caIntermediatesPath" and "caIntermediatesData" specified`,
|
||||
path: p.CAIntermediatesPath,
|
||||
data: p.CAIntermediatesData,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if caIntermediateCertPEMs != nil {
|
||||
if len(caIntermediateCertPEMs) != 1 {
|
||||
return nil, errors.New(`Internal inconsistency: PKI specified with invalid value from "caIntermediatesPath" or "caIntermediatesData"`)
|
||||
}
|
||||
intermediatePool := x509.NewCertPool()
|
||||
trustedIntermediates, err := cryptoutils.UnmarshalCertificatesFromPEM(caIntermediateCertPEMs[0])
|
||||
if err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading trusted intermediate certificates: %v", err))
|
||||
}
|
||||
for _, trustedIntermediateCert := range trustedIntermediates {
|
||||
intermediatePool.AddCert(trustedIntermediateCert)
|
||||
}
|
||||
pki.caIntermediateCertificates = intermediatePool
|
||||
}
|
||||
|
||||
if err := pki.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pki, nil
|
||||
}
|
||||
|
||||
// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy
|
||||
type sigstoreSignedTrustRoot struct {
|
||||
publicKeys []crypto.PublicKey
|
||||
fulcio *fulcioTrustRoot
|
||||
rekorPublicKeys []*ecdsa.PublicKey
|
||||
pki *pkiTrustRoot
|
||||
}
|
||||
|
||||
func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) {
|
||||
@ -166,6 +219,14 @@ func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error)
|
||||
}
|
||||
}
|
||||
|
||||
if pr.PKI != nil {
|
||||
p, err := pr.PKI.prepareTrustRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res.pki = p
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@ -189,13 +250,23 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
|
||||
}
|
||||
untrustedPayload := sig.UntrustedPayload()
|
||||
|
||||
keySources := 0
|
||||
if trustRoot.publicKeys != nil {
|
||||
keySources++
|
||||
}
|
||||
if trustRoot.fulcio != nil {
|
||||
keySources++
|
||||
}
|
||||
if trustRoot.pki != nil {
|
||||
keySources++
|
||||
}
|
||||
|
||||
var publicKeys []crypto.PublicKey
|
||||
switch {
|
||||
case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified")
|
||||
case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified")
|
||||
|
||||
case keySources > 1: // newPRSigstoreSigned rejects more than one key sources.
|
||||
return sarRejected, errors.New("Internal inconsistency: More than one of public key, Fulcio, or PKI specified")
|
||||
case keySources == 0: // newPRSigstoreSigned rejects empty key sources.
|
||||
return sarRejected, errors.New("Internal inconsistency: A public key, Fulcio, or PKI must be specified.")
|
||||
case trustRoot.publicKeys != nil:
|
||||
if trustRoot.rekorPublicKeys != nil {
|
||||
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
|
||||
@ -254,6 +325,24 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
|
||||
return sarRejected, err
|
||||
}
|
||||
publicKeys = []crypto.PublicKey{pk}
|
||||
|
||||
case trustRoot.pki != nil:
|
||||
if trustRoot.rekorPublicKeys != nil { // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: PKI specified with a Rekor public key")
|
||||
}
|
||||
untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey]
|
||||
if !ok {
|
||||
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey)
|
||||
}
|
||||
var untrustedIntermediateChainBytes []byte
|
||||
if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok {
|
||||
untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain)
|
||||
}
|
||||
pk, err := verifyPKI(trustRoot.pki, []byte(untrustedCert), untrustedIntermediateChainBytes)
|
||||
if err != nil {
|
||||
return sarRejected, err
|
||||
}
|
||||
publicKeys = []crypto.PublicKey{pk}
|
||||
}
|
||||
|
||||
if len(publicKeys) == 0 {
|
||||
|
37
vendor/github.com/containers/image/v5/signature/policy_types.go
generated
vendored
37
vendor/github.com/containers/image/v5/signature/policy_types.go
generated
vendored
@ -111,16 +111,16 @@ type prSignedBaseLayer struct {
|
||||
type prSigstoreSigned struct {
|
||||
prCommon
|
||||
|
||||
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified.
|
||||
KeyPath string `json:"keyPath,omitempty"`
|
||||
// KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
// KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified.
|
||||
KeyPaths []string `json:"keyPaths,omitempty"`
|
||||
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified.
|
||||
KeyData []byte `json:"keyData,omitempty"`
|
||||
// KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
// KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified.
|
||||
KeyDatas [][]byte `json:"keyDatas,omitempty"`
|
||||
|
||||
// Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
// Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified.
|
||||
// If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well.
|
||||
Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"`
|
||||
|
||||
@ -141,6 +141,9 @@ type prSigstoreSigned struct {
|
||||
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"`
|
||||
|
||||
// PKI specifies which PKI-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified.
|
||||
PKI PRSigstoreSignedPKI `json:"pki,omitempty"`
|
||||
|
||||
// SignedIdentity specifies what image identity the signature must be claiming about the image.
|
||||
// Defaults to "matchRepoDigestOrExact" if not specified.
|
||||
// Note that /usr/bin/cosign interoperability might require using repo-only matching.
|
||||
@ -167,6 +170,30 @@ type prSigstoreSignedFulcio struct {
|
||||
SubjectEmail string `json:"subjectEmail,omitempty"`
|
||||
}
|
||||
|
||||
// PRSigstoreSignedPKI contains PKI configuration options for a "sigstoreSigned" PolicyRequirement.
|
||||
type PRSigstoreSignedPKI interface {
|
||||
// prepareTrustRoot creates a pkiTrustRoot from the input data.
|
||||
// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.)
|
||||
prepareTrustRoot() (*pkiTrustRoot, error)
|
||||
}
|
||||
|
||||
// prSigstoreSignedPKI contains non-fulcio certificate PKI configuration options for prSigstoreSigned
|
||||
type prSigstoreSignedPKI struct {
|
||||
// CARootsPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CARootsPath and CARootsData must be specified.
|
||||
CARootsPath string `json:"caRootsPath"`
|
||||
// CARootsData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CARootsPath and CARootsData must be specified.
|
||||
CARootsData []byte `json:"caRootsData"`
|
||||
// CAIntermediatesPath a path to a file containing accepted CA intermediate certificates, in PEM format. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both.
|
||||
CAIntermediatesPath string `json:"caIntermediatesPath"`
|
||||
// CAIntermediatesData contains accepted CA intermediate certificates in PEM format, all of that base64-encoded. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both.
|
||||
CAIntermediatesData []byte `json:"caIntermediatesData"`
|
||||
|
||||
// SubjectEmail specifies the expected email address imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified.
|
||||
SubjectEmail string `json:"subjectEmail"`
|
||||
// SubjectHostname specifies the expected hostname imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified.
|
||||
SubjectHostname string `json:"subjectHostname"`
|
||||
}
|
||||
|
||||
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
|
||||
// The type is public, but its implementation is private.
|
||||
|
||||
|
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -17,11 +17,13 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
srcImpl "github.com/containers/image/v5/internal/imagesource/impl"
|
||||
srcStubs "github.com/containers/image/v5/internal/imagesource/stubs"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@ -31,6 +33,7 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -57,8 +60,9 @@ type storageImageDestination struct {
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||
manifest []byte // (Per-instance) manifest contents, or nil if not yet known.
|
||||
manifestMIMEType string // Valid if manifest != nil
|
||||
manifestDigest digest.Digest // Valid if manifest != nil
|
||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
@ -108,8 +112,10 @@ type storageImageDestinationLockProtected struct {
|
||||
//
|
||||
// Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
|
||||
// to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
// Mapping from layer index to a TOC Digest.
|
||||
// If this is set, then either c/storage/pkg/chunked/toc.GetTOCDigest must have returned a value, or indexToDiffID must be set as well.
|
||||
indexToTOCDigest map[int]digest.Digest
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
|
||||
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
|
||||
@ -121,6 +127,9 @@ type storageImageDestinationLockProtected struct {
|
||||
filenames map[digest.Digest]string
|
||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||
fileSizes map[digest.Digest]int64
|
||||
|
||||
// Config
|
||||
configDigest digest.Digest // "" if N/A or not known yet.
|
||||
}
|
||||
|
||||
// addedLayerInfo records data about a layer to use in this image.
|
||||
@ -201,6 +210,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr)
|
||||
}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@ -214,7 +235,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
return info, err
|
||||
}
|
||||
|
||||
if options.IsConfig || options.LayerIndex == nil {
|
||||
if options.IsConfig {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if s.lockProtected.configDigest != "" {
|
||||
return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q",
|
||||
s.lockProtected.configDigest.String(), info.Digest.String())
|
||||
}
|
||||
s.lockProtected.configDigest = info.Digest
|
||||
return info, nil
|
||||
}
|
||||
if options.LayerIndex == nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@ -315,6 +346,56 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
|
||||
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||
// The fallback _must not_ be done otherwise.
|
||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
|
||||
inputTOCDigest, err := toc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// The identity of partially-pulled layers is, as long as we keep compatibility with tar-like consumers,
|
||||
// unfixably ambiguous: there are two possible “views” of the same file (same compressed digest),
|
||||
// the traditional “view” that decompresses the primary stream and consumes a tar file,
|
||||
// and the partial-pull “view” that starts with the TOC.
|
||||
// The two “views” have two separate metadata sets and may refer to different parts of the blob for file contents;
|
||||
// the direct way to ensure they are consistent would be to read the full primary stream (and authenticate it against
|
||||
// the compressed digest), and ensure the metadata and layer contents exactly match the partially-pulled contents -
|
||||
// making the partial pull completely pointless.
|
||||
//
|
||||
// Instead, for partial-pull-capable layers (with inputTOCDigest set), we require the image to “commit”
|
||||
// to uncompressed layer digest values via the config's RootFS.DiffIDs array:
|
||||
// they are already naturally computed for traditionally-pulled layers, and for partially-pulled layers we
|
||||
// do the optimal partial pull, and then reconstruct the uncompressed tar stream just to (expensively) compute this digest.
|
||||
//
|
||||
// Layers which don’t support partial pulls (inputTOCDigest == "", incl. all schema1 layers) can be let through:
|
||||
// the partial pull code will either not engage, or consume the full layer; and the rules of indexToTOCDigest / layerIdentifiedByTOC
|
||||
// ensure the layer is identified by DiffID, i.e. using the traditional “view”.
|
||||
//
|
||||
// But if inputTOCDigest is set and the input image doesn't have RootFS.DiffIDs (the config is invalid for schema2/OCI),
|
||||
// don't allow a partial pull, and fall back to PutBlobWithOptions.
|
||||
//
|
||||
// (The user can opt out of the DiffID commitment checking by a c/storage option, giving up security for performance,
|
||||
// but we will still trigger the fall back here, and we will still enforce a DiffID match, so that the set of accepted images
|
||||
// is the same in both cases, and so that users are not tempted to set the c/storage option to allow accepting some invalid images.)
|
||||
var untrustedDiffID digest.Digest // "" if unknown
|
||||
udid, err := s.untrustedLayerDiffID(options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// PutBlobPartial is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: in PutBlobPartial, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
if inputTOCDigest != nil {
|
||||
return private.UploadedBlob{}, private.NewErrFallbackToOrdinaryLayerDownload(err)
|
||||
}
|
||||
untrustedDiffID = "" // A schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
untrustedDiffID = udid
|
||||
}
|
||||
|
||||
fetcher := zstdFetcher{
|
||||
chunkAccessor: chunkAccessor,
|
||||
ctx: ctx,
|
||||
@ -351,35 +432,55 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
blobDigest := srcInfo.Digest
|
||||
|
||||
s.lock.Lock()
|
||||
if out.UncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
// Don’t set indexToTOCDigest on this path:
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
|
||||
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
|
||||
// That TOC is quite unlikely to match any other TOC value.
|
||||
if err := func() error { // A scope for defer
|
||||
defer s.lock.Unlock()
|
||||
|
||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
// For true partial pulls, c/storage decides whether to compute the uncompressed digest based on an option in storage.conf
|
||||
// (defaults to true, to avoid ambiguity.)
|
||||
// c/storage can also be configured, to consume a layer not prepared for partial pulls (primarily to allow composefs conversion),
|
||||
// and in that case it always consumes the full blob and always computes the uncompressed digest.
|
||||
if out.UncompressedDigest != "" {
|
||||
// This is centrally enforced later, in commitLayer, but because we have the value available,
|
||||
// we might just as well check immediately.
|
||||
if untrustedDiffID != "" && out.UncompressedDigest != untrustedDiffID {
|
||||
return fmt.Errorf("uncompressed digest of layer %q is %q, config claims %q", srcInfo.Digest.String(),
|
||||
out.UncompressedDigest.String(), untrustedDiffID.String())
|
||||
}
|
||||
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
|
||||
// If the whole layer has been consumed, chunked.GetDiffer is responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != "" {
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
}
|
||||
} else {
|
||||
// Sanity-check the defined rules for indexToTOCDigest.
|
||||
if inputTOCDigest == nil {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned a TOC-only identity for layer %q with no TOC digest", srcInfo.Digest.String())
|
||||
}
|
||||
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
} else {
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
s.lock.Unlock()
|
||||
|
||||
succeeded = true
|
||||
return private.UploadedBlob{
|
||||
@ -417,22 +518,43 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
if err := blobDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
if options.TOCDigest != "" {
|
||||
useTOCDigest := false // If set, (options.TOCDigest != "" && options.LayerIndex != nil) AND we can use options.TOCDigest safely.
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if err := options.TOCDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
// Only consider using TOCDigest if we can avoid ambiguous image “views”, see the detailed comment in PutBlobPartial.
|
||||
_, err := s.untrustedLayerDiffID(*options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// options.TOCDigest is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("internal error: in TryReusingBlobWithOptions, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
logrus.Debugf("Not using TOC %q to look for layer reuse: %v", options.TOCDigest, err)
|
||||
// But don’t abort entirely, keep useTOCDigest = false, try a blobDigest match.
|
||||
default:
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
useTOCDigest = true
|
||||
}
|
||||
}
|
||||
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if options.SrcRef != nil && useTOCDigest {
|
||||
// Check if we have the layer in the underlying additional layer store.
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
||||
} else if err == nil {
|
||||
// Compare the long comment in PutBlobPartial. We assume that the Additional Layer Store will, somehow,
|
||||
// avoid layer “view” ambiguity.
|
||||
alsTOCDigest := aLayer.TOCDigest()
|
||||
if alsTOCDigest != options.TOCDigest {
|
||||
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
|
||||
@ -505,13 +627,13 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
|
||||
s.lockProtected.blobDiffIDs[reused.Digest] = uncompressedDigest
|
||||
return true, reused, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if useTOCDigest {
|
||||
// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
|
||||
// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
|
||||
uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
|
||||
@ -532,6 +654,11 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
if uncompressedDigest == "" && layers[0].UncompressedDigest != "" {
|
||||
// Determine an uncompressed digest if at all possible, to use a traditional image ID
|
||||
// and to maximize image reuse.
|
||||
uncompressedDigest = layers[0].UncompressedDigest
|
||||
}
|
||||
if uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
@ -568,13 +695,22 @@ func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest,
|
||||
|
||||
// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
|
||||
type trustedLayerIdentityData struct {
|
||||
layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// This can only be true if c/storage/pkg/chunked/toc.GetTOCDigest returns a value.
|
||||
layerIdentifiedByTOC bool
|
||||
|
||||
diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
|
||||
tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
|
||||
blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
|
||||
}
|
||||
|
||||
// logString() prints a representation of trusted suitable identifying a layer in logs and errors.
|
||||
// The string is already quoted to expose malicious input and does not need to be quoted again.
|
||||
// Note that it does not include _all_ of the contents.
|
||||
func (trusted trustedLayerIdentityData) logString() string {
|
||||
return fmt.Sprintf("%q/%q/%q", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
}
|
||||
|
||||
// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
|
||||
// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
|
||||
// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
|
||||
@ -785,23 +921,6 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
|
||||
// and an indication whether the input already has the shape of a layer ID.
|
||||
// It returns ("", false) if the layer is not found at all (which should never happen)
|
||||
func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
}
|
||||
return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||
//
|
||||
@ -813,16 +932,15 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
|
||||
// must guarantee that, at any given time, at most one goroutine may execute
|
||||
// `commitLayer()`.
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
|
||||
// Already committed? Return early.
|
||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
var parentLayer string
|
||||
var parentLayer string // "" if no parent
|
||||
if index != 0 {
|
||||
// s.indexToStorageID can only be written by this function, and our caller
|
||||
// is responsible for ensuring it can be only be called by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
prev, ok := s.indexToStorageID[index-1]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
|
||||
@ -830,18 +948,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
parentLayer = prev
|
||||
}
|
||||
|
||||
// Carry over the previous ID for empty non-base layers.
|
||||
if info.emptyLayer {
|
||||
s.indexToStorageID[index] = parentLayer
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
// The layerID refers either to the DiffID or the digest of the TOC.
|
||||
layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
// Collect trusted parameters of the layer.
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
// Check if the layer exists already and the caller just (incorrectly) forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
//
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
@ -865,23 +982,54 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||
}
|
||||
|
||||
layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
s.lock.Lock()
|
||||
trusted, ok = s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
|
||||
}
|
||||
}
|
||||
|
||||
id := layerIDComponent
|
||||
if !layerIDComponentStandalone || parentLayer != "" {
|
||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
|
||||
// Ensure that we always see the same “view” of a layer, as identified by the layer’s uncompressed digest,
|
||||
// unless the user has explicitly opted out of this in storage.conf: see the more detailed explanation in PutBlobPartial.
|
||||
if trusted.diffID != "" {
|
||||
untrustedDiffID, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available for DiffID check", index)
|
||||
return true, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we could not have reused a TOC-identified layer nor have done a TOC-identified partial pull,
|
||||
// i.e. there is no other “view” to worry about. Sanity-check that we really see the only expected view.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// we _must_ fail if we used a TOC-identified layer - but PutBlobPartial should have already
|
||||
// refused to do a partial pull, so we are in an inconsistent state.
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return false, fmt.Errorf("internal error: layer %d for blob %s was identified by TOC, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
}
|
||||
// else a schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
} else if trusted.diffID != untrustedDiffID {
|
||||
return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID)
|
||||
}
|
||||
}
|
||||
|
||||
id := layerID(parentLayer, trusted)
|
||||
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
s.indexToStorageID[index] = layer.ID
|
||||
return false, nil
|
||||
}
|
||||
|
||||
layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
|
||||
layer, err := s.createNewLayer(index, trusted, parentLayer, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -892,32 +1040,62 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
|
||||
// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted)
|
||||
func layerID(parentID string, trusted trustedLayerIdentityData) string {
|
||||
var component string
|
||||
mustHash := false
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
// "@" is not a valid start of a digest.Digest.Encoded(), so this is unambiguous with the !layerIdentifiedByTOC case.
|
||||
// But we _must_ hash this below to get a Digest.Encoded()-formatted value.
|
||||
component = "@TOC=" + trusted.tocDigest.Encoded()
|
||||
mustHash = true
|
||||
} else {
|
||||
component = trusted.diffID.Encoded() // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
if parentID == "" && !mustHash {
|
||||
return component
|
||||
}
|
||||
return digest.Canonical.FromString(parentID + "+" + component).Encoded()
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be "").
|
||||
// If the layer cannot be committed yet, the function returns (nil, nil).
|
||||
func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
func (s *storageImageDestination) createNewLayer(index int, trusted trustedLayerIdentityData, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
s.lock.Lock()
|
||||
diffOutput, ok := s.lockProtected.diffOutputs[index]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
// If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// Typically, we compute a trusted DiffID value to authenticate the layer contents, see the detailed explanation
|
||||
// in PutBlobPartial. If the user has opted out of that, but we know a trusted DiffID value
|
||||
// (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// That way it will be persisted in storage even if the cache is deleted; also
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
|
||||
// the costly commit delay until a manifest is available).
|
||||
s.lock.Lock()
|
||||
if d, ok := s.lockProtected.indexToDiffID[index]; ok {
|
||||
diffOutput.UncompressedDigest = d
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic.
|
||||
if diffOutput.UncompressedDigest == "" && trusted.diffID != "" {
|
||||
diffOutput.UncompressedDigest = trusted.diffID
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
var untrustedUncompressedDigest digest.Digest
|
||||
if diffOutput.UncompressedDigest == "" {
|
||||
d, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d == "" {
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we should have !trusted.layerIdentifiedByTOC, i.e. we should have set
|
||||
// diffOutput.UncompressedDigest above in this function, at the very latest.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// commitLayer should have already refused this image when dealing with the “view” ambiguity.
|
||||
return nil, fmt.Errorf("internal error: layer %d for blob %s was partially-pulled with unknown UncompressedDigest, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
untrustedUncompressedDigest = d
|
||||
@ -965,19 +1143,17 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// then we need to read the desired contents from a layer.
|
||||
var filename string
|
||||
var gotFilename bool
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest)
|
||||
if ok && trusted.blobDigest != "" {
|
||||
if trusted.blobDigest != "" {
|
||||
s.lock.Lock()
|
||||
filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
|
||||
}
|
||||
s.lock.Unlock()
|
||||
if !ok { // We have already determined newLayerID, so the data must have been available.
|
||||
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
|
||||
s.lock.Unlock()
|
||||
}
|
||||
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||
var trustedOriginalSize *int64
|
||||
if gotFilename {
|
||||
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost.
|
||||
} else {
|
||||
// Try to find the layer with contents matching the data we use.
|
||||
var layer *storage.Layer // = nil
|
||||
@ -997,7 +1173,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
return nil, fmt.Errorf("layer for blob %s not found", trusted.logString())
|
||||
}
|
||||
|
||||
// Read the layer's contents.
|
||||
@ -1007,7 +1183,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2)
|
||||
return nil, fmt.Errorf("reading layer %q for blob %s: %w", layer.ID, trusted.logString(), err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
@ -1032,22 +1208,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
if trusted.diffID == "" && layer.UncompressedDigest != "" {
|
||||
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
|
||||
}
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
|
||||
// Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse.
|
||||
// But we don’t want to just use the size from the manifest if we never saw the compressed blob,
|
||||
// so that we don’t propagate mistakes / attacks.
|
||||
//
|
||||
// FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
|
||||
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
|
||||
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
// s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename.
|
||||
// So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit
|
||||
// to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to
|
||||
// this new layer copy can help.)
|
||||
if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 {
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
sizeCopy := layer.CompressedSize
|
||||
trustedOriginalSize = &sizeCopy
|
||||
} else {
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost.
|
||||
}
|
||||
|
||||
// Allow using the already-collected layer contents without extracting the layer again.
|
||||
//
|
||||
// This only matches against the uncompressed digest.
|
||||
// We don’t have the original compressed data here to trivially set filenames[layerDigest].
|
||||
// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
|
||||
// If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the
|
||||
// same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config),
|
||||
// we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate
|
||||
// between “original files” and “possibly uncompressed files”.
|
||||
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
||||
if trusted.diffID != "" {
|
||||
s.lock.Lock()
|
||||
@ -1067,55 +1257,128 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: trustedOriginalDigest,
|
||||
OriginalSize: trustedOriginalSize, // nil in many cases
|
||||
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
|
||||
UncompressedDigest: trusted.diffID,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err)
|
||||
return nil, fmt.Errorf("adding layer with blob %s: %w", trusted.logString(), err)
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
|
||||
// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
|
||||
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
if s.manifest == nil {
|
||||
return "", nil
|
||||
// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed,
|
||||
// to allow using image.FromUnparsedImage.
|
||||
type uncommittedImageSource struct {
|
||||
srcImpl.Compat
|
||||
srcImpl.PropertyMethodsInitialize
|
||||
srcImpl.NoSignatures
|
||||
srcImpl.DoesNotAffectLayerInfosForCopy
|
||||
srcStubs.NoGetBlobAtInitialize
|
||||
|
||||
d *storageImageDestination
|
||||
}
|
||||
|
||||
func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource {
|
||||
s := &uncommittedImageSource{
|
||||
PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{
|
||||
HasThreadSafeGetBlob: true,
|
||||
}),
|
||||
NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()),
|
||||
|
||||
d: d,
|
||||
}
|
||||
s.Compat = srcImpl.AddCompat(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Reference() types.ImageReference {
|
||||
return u.d.Reference()
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
return u.d.manifest, u.d.manifestMIMEType, nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
blob, err := u.d.getConfigBlob(info)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil
|
||||
}
|
||||
|
||||
// errUntrustedLayerDiffIDNotYetAvailable is returned by untrustedLayerDiffID
|
||||
// if the value is not yet available (but it can be available after s.manifests is set).
|
||||
// This should only happen for external callers of the transport, not for c/image/copy.
|
||||
//
|
||||
// Callers of untrustedLayerDiffID before PutManifest must handle this error specially;
|
||||
// callers after PutManifest can use the default, reporting an internal error.
|
||||
var errUntrustedLayerDiffIDNotYetAvailable = errors.New("internal error: untrustedLayerDiffID has no value available and fallback was not implemented")
|
||||
|
||||
// untrustedLayerDiffIDUnknownError is returned by untrustedLayerDiffID
|
||||
// if the image’s format does not provide DiffIDs.
|
||||
type untrustedLayerDiffIDUnknownError struct {
|
||||
layerIndex int
|
||||
}
|
||||
|
||||
func (e untrustedLayerDiffIDUnknownError) Error() string {
|
||||
return fmt.Sprintf("DiffID value for layer %d is unknown or explicitly empty", e.layerIndex)
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// It may return two special errors, errUntrustedLayerDiffIDNotYetAvailable or untrustedLayerDiffIDUnknownError.
|
||||
//
|
||||
// WARNING: This function does not even validate that the returned digest has a valid format.
|
||||
// WARNING: We don’t _always_ validate this DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob,
|
||||
// nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set
|
||||
// by NoteOriginalOCIConfig and are not being updated any more;
|
||||
// or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
|
||||
if s.untrustedDiffIDValues == nil {
|
||||
mt := manifest.GuessMIMEType(s.manifest)
|
||||
if mt != imgspecv1.MediaTypeImageManifest {
|
||||
// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
|
||||
// and then use types.Image.OCIConfig so that we can parse the image.
|
||||
//
|
||||
// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
|
||||
// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
|
||||
// So it is not implemented yet.
|
||||
return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
|
||||
}
|
||||
man, err := manifest.FromBlob(s.manifest, mt)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parsing manifest: %w", err)
|
||||
// Typically, we expect untrustedDiffIDValues to be set by the generic copy code
|
||||
// via NoteOriginalOCIConfig; this is a compatibility fallback for external callers
|
||||
// of the public types.ImageDestination.
|
||||
if s.manifest == nil {
|
||||
return "", errUntrustedLayerDiffIDNotYetAvailable
|
||||
}
|
||||
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
ctx := context.Background() // This is all happening in memory, no need to worry about cancellation.
|
||||
unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil)
|
||||
sourced, err := image.FromUnparsedImage(ctx, nil, unparsed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", fmt.Errorf("parsing image to be committed: %w", err)
|
||||
}
|
||||
configOCI, err := sourced.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("obtaining config of image to be committed: %w", err)
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return "", err
|
||||
}
|
||||
s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(configOCI)
|
||||
}
|
||||
|
||||
// Let entirely empty / missing diffIDs through; but if the array does exist, expect it to contain an entry for every layer,
|
||||
// and fail hard on missing entries. This tries to account for completely naive image producers who just don’t fill DiffID,
|
||||
// while still detecting incorrectly-built / confused images.
|
||||
//
|
||||
// schema1 images don’t have DiffID values in the config.
|
||||
// Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values, so treat arrays of all-empty
|
||||
// values as “DiffID unknown”.
|
||||
// For schema 1, it is important to exit here, before the layerIndex >= len(s.untrustedDiffIDValues)
|
||||
// check, because the format conversion from schema1 to OCI used to compute untrustedDiffIDValues
|
||||
// changes the number of layres (drops items with Schema1V1Compatibility.ThrowAway).
|
||||
if !slices.ContainsFunc(s.untrustedDiffIDValues, func(d digest.Digest) bool {
|
||||
return d != ""
|
||||
}) {
|
||||
return "", untrustedLayerDiffIDUnknownError{
|
||||
layerIndex: layerIndex,
|
||||
}
|
||||
}
|
||||
if layerIndex >= len(s.untrustedDiffIDValues) {
|
||||
@ -1124,6 +1387,15 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
||||
return s.untrustedDiffIDValues[layerIndex], nil
|
||||
}
|
||||
|
||||
// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config.
|
||||
// The caller must ensure s.lock does not need to be held.
|
||||
func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) {
|
||||
s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
}
|
||||
}
|
||||
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
@ -1131,7 +1403,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
||||
func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
|
||||
if len(s.manifest) == 0 {
|
||||
if s.manifest == nil {
|
||||
return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
|
||||
}
|
||||
toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
|
||||
@ -1159,7 +1431,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||
}
|
||||
}
|
||||
// Find the list of layer blobs.
|
||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||
man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
@ -1193,29 +1465,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||
imgOptions.CreationDate = *inspect.Created
|
||||
}
|
||||
|
||||
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
||||
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
||||
dataBlobs := set.New[digest.Digest]()
|
||||
for blob := range s.lockProtected.filenames {
|
||||
dataBlobs.Add(blob)
|
||||
}
|
||||
for _, layerBlob := range layerBlobs {
|
||||
dataBlobs.Delete(layerBlob.Digest)
|
||||
}
|
||||
for _, blob := range dataBlobs.Values() {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[blob])
|
||||
// Set up to save the config as a data item. Since we only share layers, the config should be in a file.
|
||||
if s.lockProtected.configDigest != "" {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest])
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
|
||||
return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err)
|
||||
}
|
||||
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
|
||||
Key: blob.String(),
|
||||
Key: s.lockProtected.configDigest.String(),
|
||||
Data: v,
|
||||
Digest: digest.Canonical.FromBytes(v),
|
||||
})
|
||||
}
|
||||
// Set up to save the options.UnparsedToplevel's manifest if it differs from
|
||||
// the per-platform one, which is saved below.
|
||||
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
if !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
manifestDigest, err := manifest.Digest(toplevelManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("digesting top-level manifest: %w", err)
|
||||
@ -1370,6 +1634,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
|
||||
return err
|
||||
}
|
||||
s.manifest = bytes.Clone(manifestBlob)
|
||||
if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil
|
||||
s.manifest = []byte{}
|
||||
}
|
||||
s.manifestMIMEType = manifest.GuessMIMEType(s.manifest)
|
||||
s.manifestDigest = digest
|
||||
return nil
|
||||
}
|
||||
@ -1392,7 +1660,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
|
||||
if instanceDigest == nil {
|
||||
s.signatures = sigblob
|
||||
s.metadata.SignatureSizes = sizes
|
||||
if len(s.manifest) > 0 {
|
||||
if s.manifest != nil {
|
||||
manifestDigest := s.manifestDigest
|
||||
instanceDigest = &manifestDigest
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
|
||||
}
|
||||
if s.id == "" {
|
||||
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
// %.0w makes the error visible to error.Unwrap() without including any text.
|
||||
// ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause.
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
}
|
||||
if loadedImage == nil {
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
|
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
@ -35,13 +35,14 @@ type storageImageSource struct {
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.NoGetBlobAtInitialize
|
||||
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
cachedManifestMIMEType string // Valid if cachedManifest != nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
}
|
||||
|
||||
// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
|
||||
@ -247,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
}
|
||||
return blob, manifest.GuessMIMEType(blob), err
|
||||
}
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
// The manifest is stored as a big data item.
|
||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||
if s.imageRef.named != nil {
|
||||
@ -267,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
}
|
||||
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
||||
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest)
|
||||
}
|
||||
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
|
||||
return s.cachedManifest, s.cachedManifestMIMEType, err
|
||||
}
|
||||
|
||||
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
||||
|
61
vendor/github.com/containers/image/v5/tarball/tarball_src.go
generated
vendored
61
vendor/github.com/containers/image/v5/tarball/tarball_src.go
generated
vendored
@ -14,8 +14,9 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/internal/imagesource/impl"
|
||||
"github.com/containers/image/v5/internal/imagesource/stubs"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressionTypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecs "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -82,31 +83,47 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
||||
}
|
||||
}
|
||||
|
||||
// Default to assuming the layer is compressed.
|
||||
layerType := imgspecv1.MediaTypeImageLayerGzip
|
||||
|
||||
// Set up to digest the file as it is.
|
||||
blobIDdigester := digest.Canonical.Digester()
|
||||
reader = io.TeeReader(reader, blobIDdigester.Hash())
|
||||
|
||||
// Set up to digest the file after we maybe decompress it.
|
||||
diffIDdigester := digest.Canonical.Digester()
|
||||
uncompressed, err := pgzip.NewReader(reader)
|
||||
if err == nil {
|
||||
// It is compressed, so the diffID is the digest of the uncompressed version
|
||||
reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
|
||||
} else {
|
||||
// It is not compressed, so the diffID and the blobID are going to be the same
|
||||
diffIDdigester = blobIDdigester
|
||||
layerType = imgspecv1.MediaTypeImageLayer
|
||||
uncompressed = nil
|
||||
}
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
if _, err := io.Copy(io.Discard, reader); err != nil {
|
||||
return nil, fmt.Errorf("error reading %q: %w", filename, err)
|
||||
}
|
||||
if uncompressed != nil {
|
||||
uncompressed.Close()
|
||||
var layerType string
|
||||
var diffIDdigester digest.Digester
|
||||
// If necessary, digest the file after we decompress it.
|
||||
if err := func() error { // A scope for defer
|
||||
format, decompressor, reader, err := compression.DetectCompressionFormat(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if decompressor != nil {
|
||||
uncompressed, err := decompressor(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer uncompressed.Close()
|
||||
// It is compressed, so the diffID is the digest of the uncompressed version
|
||||
diffIDdigester = digest.Canonical.Digester()
|
||||
reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
|
||||
switch format.Name() {
|
||||
case compressionTypes.GzipAlgorithmName:
|
||||
layerType = imgspecv1.MediaTypeImageLayerGzip
|
||||
case compressionTypes.ZstdAlgorithmName:
|
||||
layerType = imgspecv1.MediaTypeImageLayerZstd
|
||||
default: // This is incorrect, but we have no good options, and it is what this transport was historically doing.
|
||||
layerType = imgspecv1.MediaTypeImageLayerGzip
|
||||
}
|
||||
} else {
|
||||
// It is not compressed, so the diffID and the blobID are going to be the same
|
||||
diffIDdigester = blobIDdigester
|
||||
layerType = imgspecv1.MediaTypeImageLayer
|
||||
}
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
if _, err := io.Copy(io.Discard, reader); err != nil {
|
||||
return fmt.Errorf("error reading %q: %w", filename, err)
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Grab our uncompressed and possibly-compressed digests and sizes.
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,9 +6,9 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 33
|
||||
VersionMinor = 34
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
VersionPatch = 3
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
2
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/utils"
|
||||
"go.mozilla.org/pkcs7"
|
||||
"github.com/smallstep/pkcs7"
|
||||
)
|
||||
|
||||
type pkcs7KeyWrapper struct {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user