Compare commits

..

1 Commits

Author SHA1 Message Date
Tom Sweeney
8b9999e1d5 Bump to v1.13.0
As the title says.  In preparation of RHEL 8.9/9.3

[NO NEW TESTS NEEDED]

Signed-off-by: Tom Sweeney <tsweeney@redhat.com>
2023-07-05 16:16:38 -04:00
1089 changed files with 41967 additions and 315371 deletions

View File

@@ -20,8 +20,13 @@ env:
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
####
#### Cache-image names to test with (double-quotes around names are critical)
####
FEDORA_NAME: "fedora-38"
# Google-cloud VM Images
IMAGE_SUFFIX: "c20231116t174419z-f39f38d13"
IMAGE_SUFFIX: "c20230614t132754z-f38f37d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
# Container FQIN's
@@ -72,13 +77,14 @@ doccheck_task:
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
osx_task:
# Don't run for docs-only builds.
# Don't run for docs-only or multi-arch image builds.
# Also don't run on release-branches or their PRs,
# since base container-image is not version-constrained.
only_if: &not_docs_or_release_branch >-
($CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH ||
$CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH ) &&
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_CRON != 'multiarch'
depends_on:
- validate
macos_instance:
@@ -100,7 +106,8 @@ osx_task:
cross_task:
alias: cross
only_if: >-
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_CRON != 'multiarch'
depends_on:
- validate
gce_instance: &standardvm
@@ -163,10 +170,11 @@ ostree-rs-ext_task:
#####
test_skopeo_task:
alias: test_skopeo
# Don't test for [CI:DOCS], [CI:BUILD].
# Don't test for [CI:DOCS], [CI:BUILD], or 'multiarch' cron.
only_if: >-
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_CRON != 'multiarch'
depends_on:
- validate
gce_instance:
@@ -199,6 +207,49 @@ test_skopeo_task:
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
image_build_task: &image-build
name: "Build multi-arch $CTXDIR"
alias: image_build
# Some of these container images take > 1h to build, limit
# this task to a specific Cirrus-Cron entry with this name.
only_if: $CIRRUS_CRON == 'multiarch'
timeout_in: 120m # emulation is sssllllooooowwww
gce_instance:
<<: *standardvm
image_name: build-push-${IMAGE_SUFFIX}
# More muscle required for parallel multi-arch build
type: "n2-standard-4"
matrix:
- env:
CTXDIR: contrib/skopeoimage/upstream
- env:
CTXDIR: contrib/skopeoimage/testing
- env:
CTXDIR: contrib/skopeoimage/stable
env:
SKOPEO_USERNAME: ENCRYPTED[4195884d23b154553f2ddb26a63fc9fbca50ba77b3e447e4da685d8639ed9bc94b9a86a9c77272c8c80d32ead9ca48da]
SKOPEO_PASSWORD: ENCRYPTED[36e06f9befd17e5da2d60260edb9ef0d40e6312e2bba4cf881d383f1b8b5a18c8e5a553aea2fdebf39cebc6bd3b3f9de]
CONTAINERS_USERNAME: ENCRYPTED[dd722c734641f103b394a3a834d51ca5415347e378637cf98ee1f99e64aad2ec3dbd4664c0d94cb0e06b83d89e9bbe91]
CONTAINERS_PASSWORD: ENCRYPTED[d8b0fac87fe251cedd26c864ba800480f9e0570440b9eb264265b67411b253a626fb69d519e188e6c9a7f525860ddb26]
main_script:
- source /etc/automation_environment
- main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR
test_image_build_task:
<<: *image-build
alias: test_image_build
# Allow this to run inside a PR w/ [CI:BUILD] only.
only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
# This takes a LONG time, only run when requested. N/B: Any task
# made to depend on this one will block FOREVER unless triggered.
# DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
trigger_type: manual
# Overwrite all 'env', don't push anything, just do the build.
env:
DRYRUN: 1
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
@@ -237,6 +288,7 @@ success_task:
- cross
- proxy_ostree_ext
- test_skopeo
- image_build
- meta
container: *smallcontainer
env:

View File

@@ -1,20 +0,0 @@
---
# See also:
# https://github.com/containers/podman/blob/main/.github/workflows/discussion_lock.yml
on:
schedule:
- cron: '0 0 * * *'
# Debug: Allow triggering job manually in github-actions WebUI
workflow_dispatch: {}
jobs:
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
closed_issue_discussion_lock:
uses: containers/podman/.github/workflows/discussion_lock.yml@main
secrets: inherit
permissions:
contents: read
issues: write
pull-requests: write

View File

@@ -1,3 +0,0 @@
---
run:
timeout: 5m

View File

@@ -6,47 +6,38 @@
# supported Fedora and CentOS Stream arches.
# They do not block the current Cirrus-based workflow.
# Build targets can be found at:
# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/packit-builds/
# and
# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/
specfile_path: rpm/skopeo.spec
upstream_tag_template: v{version}
srpm_build_deps:
- make
jobs:
- job: copr_build
- &copr
job: copr_build
trigger: pull_request
notifications:
failure_comment:
message: "Ephemeral COPR build failed. @containers/packit-build please check."
enable_net: true
targets:
- fedora-all-x86_64
- fedora-all-aarch64
- fedora-eln-x86_64
- fedora-eln-aarch64
- centos-stream+epel-next-8-x86_64
- centos-stream+epel-next-8-aarch64
- centos-stream+epel-next-9-x86_64
- centos-stream+epel-next-9-aarch64
additional_repos:
- "copr://rhcontainerbot/podman-next"
# Run on commit to main branch
- job: copr_build
trigger: commit
notifications:
failure_comment:
message: "podman-next COPR build failed. @containers/packit-build please check."
branch: main
owner: rhcontainerbot
project: podman-next
project: packit-builds
enable_net: true
srpm_build_deps:
- make
- <<: *copr
# Run on commit to main branch
trigger: commit
branch: main
project: podman-next
- job: propose_downstream
trigger: release
update_release: false
dist_git_branches:
- fedora-all
actions:
pre-sync:
- "bash rpm/update-spec-provides.sh"
- job: koji_build
trigger: commit

View File

@@ -27,7 +27,7 @@ GOARCH ?= $(shell go env GOARCH)
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
GOLANGCI_LINT_VERSION := 1.55.2
GOLANGCI_LINT_VERSION := 1.53.3
ifeq ($(GOBIN),)
GOBIN := $(GOPATH)/bin
@@ -117,7 +117,6 @@ help:
@echo " * 'install' - Install binaries and documents to system locations"
@echo " * 'binary' - Build skopeo with a container"
@echo " * 'bin/skopeo' - Build skopeo locally"
@echo " * 'bin/skopeo.OS.ARCH' - Build skopeo for specific OS and ARCH"
@echo " * 'test-unit' - Execute unit tests"
@echo " * 'test-integration' - Execute integration tests"
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"

View File

@@ -1,18 +0,0 @@
package main
import (
"path/filepath"
"testing"
)
func TestLogin(t *testing.T) {
dir := t.TempDir()
authFile := filepath.Join(dir, "auth.json")
compatAuthFile := filepath.Join(dir, "config.json")
// Just a trivial smoke-test exercising one error-handling path.
// We cant test full operation without a registry, unit tests should mostly
// exist in c/common/pkg/auth, not here.
out, err := runSkopeo("login", "--authfile", authFile, "--compat-auth-file", compatAuthFile, "example.com")
assertTestFailed(t, out, err, "options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")
}

View File

@@ -1,25 +0,0 @@
package main
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func TestLogout(t *testing.T) {
dir := t.TempDir()
authFile := filepath.Join(dir, "auth.json")
compatAuthFile := filepath.Join(dir, "config.json")
// Just a trivial smoke-test exercising one error-handling path.
// We cant test full operation without a registry, unit tests should mostly
// exist in c/common/pkg/auth, not here.
err := os.WriteFile(authFile, []byte("{}"), 0o700)
require.NoError(t, err)
err = os.WriteFile(compatAuthFile, []byte("{}"), 0o700)
require.NoError(t, err)
out, err := runSkopeo("logout", "--authfile", authFile, "--compat-auth-file", compatAuthFile, "example.com")
assertTestFailed(t, out, err, "options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")
}

View File

@@ -75,6 +75,7 @@ import (
"github.com/containers/image/v5/manifest"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
@@ -155,7 +156,7 @@ type activePipe struct {
// openImage is an opened image reference
type openImage struct {
// id is an opaque integer handle
id uint64
id uint32
src types.ImageSource
cachedimg types.Image
}
@@ -170,9 +171,9 @@ type proxyHandler struct {
cache types.BlobInfoCache
// imageSerial is a counter for open images
imageSerial uint64
imageSerial uint32
// images holds our opened images
images map[uint64]*openImage
images map[uint32]*openImage
// activePipes maps from "pipeid" to a pipe + goroutine pair
activePipes map[uint32]*activePipe
}
@@ -238,7 +239,7 @@ func isNotFoundImageError(err error) bool {
errors.Is(err, ocilayout.ImageNotFoundError{})
}
func (h *proxyHandler) openImageImpl(args []any, allowNotFound bool) (retReplyBuf replyBuf, retErr error) {
func (h *proxyHandler) openImageImpl(args []any, allowNotFound bool) (replyBuf, error) {
h.lock.Lock()
defer h.lock.Unlock()
var ret replyBuf
@@ -267,23 +268,21 @@ func (h *proxyHandler) openImageImpl(args []any, allowNotFound bool) (retReplyBu
return ret, err
}
policyContext, err := h.opts.global.getPolicyContext()
if err != nil {
return ret, err
}
defer func() {
if err := policyContext.Destroy(); err != nil {
retErr = noteCloseFailure(retErr, "tearing down policy context", err)
}
}()
unparsedTopLevel := image.UnparsedInstance(imgsrc, nil)
allowed, err := policyContext.IsRunningImageAllowed(context.Background(), unparsedTopLevel)
policy, err := signature.DefaultPolicy(h.sysctx)
if err != nil {
return ret, err
}
if !allowed {
return ret, fmt.Errorf("internal inconsistency: policy verification failed without returning an error")
policyContext, err := signature.NewPolicyContext(policy)
if err != nil {
return ret, err
}
allowed, err := policyContext.IsRunningImageAllowed(context.Background(), unparsedTopLevel)
if !allowed || err != nil {
return ret, err
}
if !allowed && err == nil {
return ret, fmt.Errorf("policy verification failed unexpectedly")
}
// Note that we never return zero as an imageid; this code doesn't yet
@@ -327,6 +326,14 @@ func (h *proxyHandler) CloseImage(args []any) (replyBuf, error) {
return ret, nil
}
func parseImageID(v any) (uint32, error) {
imgidf, ok := v.(float64)
if !ok {
return 0, fmt.Errorf("expecting integer imageid, not %T", v)
}
return uint32(imgidf), nil
}
// parseUint64 validates that a number fits inside a JavaScript safe integer
func parseUint64(v any) (uint64, error) {
f, ok := v.(float64)
@@ -340,7 +347,7 @@ func parseUint64(v any) (uint64, error) {
}
func (h *proxyHandler) parseImageFromID(v any) (*openImage, error) {
imgid, err := parseUint64(v)
imgid, err := parseImageID(v)
if err != nil {
return nil, err
}
@@ -840,7 +847,7 @@ func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate
func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
handler := &proxyHandler{
opts: opts,
images: make(map[uint64]*openImage),
images: make(map[uint32]*openImage),
activePipes: make(map[uint32]*activePipe),
}
defer handler.close()

View File

@@ -25,7 +25,9 @@ the images live are public and can be pulled without credentials. These contain
resulting containers can run safely with privileges within the container.
The container images are built using the latest Fedora and then Skopeo is installed into them.
The ENTRYPOINT of the container is set to execute the `skopeo` binary.
The PATH in the container images is set to the default PATH provided by Fedora. Also, the
ENTRYPOINT and the WORKDIR variables are not set within these container images, as such they
default to `/`.
The container images are:

View File

@@ -182,7 +182,7 @@ Existing signatures, if any, are preserved as well.
**--dest-compress-format** _format_
Specifies the compression format to use. Supported values are: `gzip`, `zstd` and `zstd:chunked`.
Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
**--dest-compress-level** _format_

View File

@@ -36,10 +36,6 @@ Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--compat-auth-file**=*path*
Instead of updating the default credentials file, update the one at *path*, and use a Docker-compatible format.
**--get-login**
Return the logged-in user for the registry. Return error if no login is found.

View File

@@ -23,10 +23,6 @@ Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
**--compat-auth-file**=*path*
Instead of updating the default credentials file, update the one at *path*, and use a Docker-compatible format.
**--all**, **-a**
Remove the cached credentials for all registries in the auth file

View File

@@ -121,7 +121,7 @@ Print the version number
**/etc/containers/registries.d**
Default directory containing registry configuration, if **--registries.d** is not specified.
The contents of this directory are documented in [containers-registries.d(5)](https://github.com/containers/image/blob/main/docs/containers-registries.d.5.md).
The contents of this directory are documented in [containers-policy.json(5)](https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md).
## SEE ALSO
skopeo-login(1), docker-login(1), containers-auth.json(5), containers-storage.conf(5), containers-policy.json(5), containers-transports(5)

111
go.mod
View File

@@ -1,23 +1,23 @@
module github.com/containers/skopeo
go 1.19
go 1.18
require (
github.com/containers/common v0.57.0
github.com/containers/image/v5 v5.29.0
github.com/containers/ocicrypt v1.1.9
github.com/containers/storage v1.51.0
github.com/docker/distribution v2.8.3+incompatible
github.com/containers/common v0.55.1
github.com/containers/image/v5 v5.26.1
github.com/containers/ocicrypt v1.1.7
github.com/containers/storage v1.48.0
github.com/docker/distribution v2.8.2+incompatible
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc5
github.com/opencontainers/image-spec v1.1.0-rc4
github.com/opencontainers/image-tools v1.0.0-rc3
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.0
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.4
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/term v0.14.0
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
golang.org/x/term v0.9.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -25,31 +25,30 @@ require (
dario.cat/mergo v1.0.0 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect
github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/containerd/cgroups/v3 v3.0.2 // indirect
github.com/containerd/containerd v1.7.9 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/containerd/containerd v1.7.2 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/coreos/go-oidc/v3 v3.7.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/coreos/go-oidc/v3 v3.6.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/docker v24.0.7+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.0 // indirect
github.com/docker/docker v24.0.2+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/errors v0.20.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/loads v0.21.2 // indirect
github.com/go-openapi/runtime v0.26.0 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
@@ -59,33 +58,32 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-containerregistry v0.16.1 // indirect
github.com/google/go-containerregistry v0.15.2 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.5 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.3 // indirect
github.com/klauspost/compress v1.16.6 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mattn/go-sqlite3 v1.14.18 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/sys/mountinfo v0.7.1 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/runc v1.1.10 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opencontainers/runc v1.1.7 // indirect
github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
@@ -94,40 +92,41 @@ require (
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.4.3 // indirect
github.com/sigstore/rekor v1.2.2 // indirect
github.com/sigstore/sigstore v1.7.5 // indirect
github.com/sigstore/fulcio v1.3.1 // indirect
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect
github.com/sigstore/sigstore v1.7.1 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.15.0 // indirect
github.com/sylabs/sif/v2 v2.11.5 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.5 // indirect
github.com/vbauerster/mpb/v8 v8.6.2 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
github.com/vbauerster/mpb/v8 v8.4.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.etcd.io/bbolt v1.3.7 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
golang.org/x/crypto v0.15.0 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/oauth2 v0.14.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.14.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
google.golang.org/grpc v1.58.3 // indirect
google.golang.org/protobuf v1.31.0 // indirect
go.opentelemetry.io/otel v1.15.0 // indirect
go.opentelemetry.io/otel/trace v1.15.0 // indirect
golang.org/x/crypto v0.10.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.11.0 // indirect
golang.org/x/oauth2 v0.9.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/tools v0.9.3 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.55.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

270
go.sum
View File

@@ -4,12 +4,13 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A=
github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU=
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
@@ -24,45 +25,41 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc=
github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containers/common v0.57.0 h1:5O/+6QUBafKK0/zeok9y1rLPukfWgdE0sT4nuzmyAqk=
github.com/containers/common v0.57.0/go.mod h1:t/Z+/sFrapvFMEJe3YnecN49/Tae2wYEQShbEN6SRaU=
github.com/containers/image/v5 v5.29.0 h1:9+nhS/ZM7c4Kuzu5tJ0NMpxrgoryOJ2HAYTgG8Ny7j4=
github.com/containers/image/v5 v5.29.0/go.mod h1:kQ7qcDsps424ZAz24thD+x7+dJw1vgur3A9tTDsj97E=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
github.com/containers/common v0.55.1 h1:sOlcIxEYXoR3OSHufew7CuSeOWr7a2jHGYw3r+xKA1k=
github.com/containers/common v0.55.1/go.mod h1:ZKPllYOZ2xj2rgWRdnHHVvWg6ru4BT28En8mO8DMMPk=
github.com/containers/image/v5 v5.26.1 h1:8y3xq8GO/6y8FR+nAedHPsAFiAtOrab9qHTBpbqaX8g=
github.com/containers/image/v5 v5.26.1/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA=
github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc=
github.com/coreos/go-oidc/v3 v3.7.0 h1:FTdj0uexT4diYIPlF4yoFVI5MRO1r5+SEcIpEw9vC0o=
github.com/coreos/go-oidc/v3 v3.7.0/go.mod h1:yQzSCqBnK3e6Fs5l+f5i0F8Kwf0zpH9bPEsbY00KanM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U=
github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw=
github.com/containers/storage v1.48.0 h1:wiPs8J2xiFoOEAhxHDRtP6A90Jzj57VqzLRXOqeizns=
github.com/containers/storage v1.48.0/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0=
github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o=
github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM=
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg=
github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
@@ -75,11 +72,11 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
@@ -88,16 +85,14 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc=
github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
@@ -115,12 +110,11 @@ github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KA
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-rod/rod v0.114.4 h1:FpkNFukjCuZLwnoLs+S9aCL95o/EMec6M+41UmvQay8=
github.com/go-rod/rod v0.113.3 h1:oLiKZW721CCMwA5g7977cWfcAKQ+FuosP47Zf1QiDrA=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
@@ -156,6 +150,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -165,7 +160,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -177,9 +171,9 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE=
github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -187,8 +181,8 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBx
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -200,8 +194,8 @@ github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxC
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@@ -219,15 +213,14 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -242,12 +235,10 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@@ -257,8 +248,8 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@@ -270,25 +261,25 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0=
github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eBSHhccVHy6lsBeM4=
github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU=
github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -298,44 +289,44 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI=
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ=
github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks=
github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -354,12 +345,14 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw=
github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc=
github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM=
github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA=
github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
@@ -367,10 +360,11 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHT
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA=
github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo=
github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
@@ -393,23 +387,23 @@ github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.opentelemetry.io/otel v1.15.0 h1:NIl24d4eiLJPM0vKn4HjLYM+UZf6gSfi9Z+NmCxkWbk=
go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek=
go.opentelemetry.io/otel/sdk v1.15.0 h1:jZTCkRRd08nxD6w7rIaZeDNGZGGQstH3SfLQ3ZsKICk=
go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo=
go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -418,26 +412,26 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -445,12 +439,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs=
golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -459,9 +452,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -470,6 +462,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -477,21 +470,20 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -505,29 +497,28 @@ golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -539,17 +530,19 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U=
gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
@@ -558,9 +551,10 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -123,7 +123,7 @@ podman run docker://quay.io/skopeo/stable:latest copy --help
Otherwise, read on for building and installing it from source:
To build the `skopeo` binary you need at least Go 1.19.
To build the `skopeo` binary you need at least Go 1.12.
There are two ways to build skopeo: in a container, or locally without a
container. Choose the one which better matches your needs and environment.
@@ -174,16 +174,6 @@ document generation can be skipped by passing `DISABLE_DOCS=1`:
DISABLE_DOCS=1 make
```
### Cross-compilation
For cross-building skopeo, use the command `make bin/skopeo.OS.ARCH`, where OS represents
the target operating system and ARCH stands for the desired architecture. For instance,
to build skopeo for RISC-V 64-bit Linux, execute:
```bash
make bin/skopeo.linux.riscv64
```
### Building documentation
To build the manual you will need go-md2man.

View File

@@ -239,7 +239,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
if !ok {
return fmt.Errorf("OpenImage return value is %T", v)
}
imgid := uint64(imgidv)
imgid := uint32(imgidv)
if imgid == 0 {
return fmt.Errorf("got zero from expected image")
}
@@ -254,7 +254,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
if !ok {
return fmt.Errorf("OpenImageOptional return value is %T", v)
}
imgid2 := uint64(imgidv)
imgid2 := uint32(imgidv)
if imgid2 == 0 {
return fmt.Errorf("got zero from expected image")
}
@@ -325,7 +325,7 @@ func runTestOpenImageOptionalNotFound(p *proxy, img string) error {
if !ok {
return fmt.Errorf("OpenImageOptional return value is %T", v)
}
imgid := uint64(imgidv)
imgid := uint32(imgidv)
if imgid != 0 {
return fmt.Errorf("Unexpected optional image id %v", imgid)
}

View File

@@ -7,12 +7,9 @@
%global debug_package %{nil}
%endif
# RHEL's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we
# set it separately here and do not depend on RHEL's go-[s]rpm-macros package
# until that's fixed.
# c9s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227328
# c8s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227331
%if %{defined rhel}
# RHEL 8's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we
# set it separately here and do not depend on RHEL 8's go-srpm-macros package.
%if %{defined rhel} && 0%{?rhel} == 8
%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**};
%endif
@@ -44,8 +41,7 @@ Epoch: %{conditional_epoch}
# copr and koji builds.
# If you're reading this on dist-git, the version is automatically filled in by Packit.
Version: 0
# The `AND` needs to be uppercase in the License for SPDX compatibility
License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0
License: Apache-2.0 and BSD-2-Clause and BSD-3-Clause and ISC and MIT and MPL-2.0
Release: %autorelease
%if %{defined golang_arches_future}
ExclusiveArch: %{golang_arches_future}
@@ -73,6 +69,8 @@ BuildRequires: glib2-devel
BuildRequires: make
BuildRequires: shadow-utils-subid-devel
Requires: containers-common >= 4:1-21
# DO NOT DELETE BELOW LINE - used for updating downstream goimports
# vendored libraries
%description
Command line utility to inspect images and repositories directly on Docker

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
# This script will update the goimports in the rpm spec for downstream fedora
# packaging, via the `propose-downstream` packit action.
# The goimports don't need to be present upstream.
set -eo pipefail
PACKAGE=skopeo
# script is run from git root directory
SPEC_FILE=rpm/$PACKAGE.spec
sed -i '/Provides: bundled(golang.*/d' $SPEC_FILE
GO_IMPORTS=$(golist --imported --package-path github.com/containers/$PACKAGE --skip-self | sort -u | xargs "-I{}" echo "Provides: bundled(golang({}))")
awk -v r="$GO_IMPORTS" '/^# vendored libraries/ {print; print r; next} 1' $SPEC_FILE > temp && mv temp $SPEC_FILE

View File

@@ -0,0 +1,57 @@
/*
mkwinsyscall generates windows system call bodies
It parses all files specified on command line containing function
prototypes (like syscall_windows.go) and prints system call bodies
to standard output.
The prototypes are marked by lines beginning with "//sys" and read
like func declarations if //sys is replaced by func, but:
- The parameter lists must give a name for each argument. This
includes return parameters.
- The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
- If the return parameter is an error number, it must be named err.
- If go func name needs to be different from its winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
- Each function that returns err needs to supply a condition, that
return value of winapi will be tested against to detect failure.
This would set err to windows "last-error", otherwise it will be nil.
The value can be provided at end of //sys declaration, like
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
and is [failretval==0] by default.
- If the function name ends in a "?", then the function not existing is non-
fatal, and an error will be returned instead of panicking.
Usage:
mkwinsyscall [flags] [path ...]
Flags
-output string
Output file name (standard output if omitted).
-sort
Sort DLL and function declarations (default true).
Intended to help transition from older versions of mkwinsyscall by making diffs
easier to read and understand.
-systemdll
Whether all DLLs should be loaded from the Windows system directory (default true).
-trace
Generate print statement after every syscall.
-utf16
Encode string arguments as UTF-16 for syscalls not ending in 'A' or 'W' (default true).
-winio
Import this package ("github.com/Microsoft/go-winio").
*/
package main

File diff suppressed because it is too large Load Diff

View File

@@ -37,10 +37,6 @@ rootfs-conv/*
deps/*
out/*
# protobuf files
# only files at root of the repo, otherwise this will cause issues with vendoring
/protobuf/*
# test results
test/results

View File

@@ -21,31 +21,17 @@ linters:
# - unused
- gofmt # whether code was gofmt-ed
- govet # enabled by default, but just to be sure
- nolintlint # ill-formed or insufficient nolint directives
- stylecheck # golint replacement
- thelper # test helpers without t.Helper()
linters-settings:
govet:
enable-all: true
disable:
# struct order is often for Win32 compat
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
- fieldalignment
check-shadowing: true
stylecheck:
# https://staticcheck.io/docs/checks
checks: ["all"]
issues:
exclude-rules:
# err is very often shadowed in nested scopes
- linters:
- govet
text: '^shadow: declaration of "err" shadows declaration'
# path is relative to module root, which is ./test/
- path: cri-containerd
linters:
@@ -149,19 +135,3 @@ issues:
linters:
- stylecheck
Text: "ST1003:"
# v0 APIs are deprecated, but still retained for backwards compatability
- path: cmd\\ncproxy\\
linters:
- staticcheck
text: "^SA1019: .*(ncproxygrpc|nodenetsvc)[/]?v0"
- path: internal\\tools\\networkagent
linters:
- staticcheck
text: "^SA1019: .*nodenetsvc[/]?v0"
- path: internal\\vhdx\\info
linters:
- stylecheck
Text: "ST1003:"

View File

@@ -1,25 +1,48 @@
version = "2"
generators = ["go", "go-grpc"]
version = "1"
generator = "gogoctrd"
plugins = ["grpc", "fieldpath"]
# Control protoc include paths.
# Control protoc include paths. Below are usually some good defaults, but feel
# free to try it without them if it works for your project.
[includes]
# Include paths that will be added before all others. Typically, you want to
# treat the root of the project as an include, but this may not be necessary.
before = ["./protobuf"]
# defaults are "/usr/local/include" and "/usr/include", which don't exist on Windows.
# override defaults to supress errors about non-existant directories.
after = []
# Paths that should be treated as include roots in relation to the vendor
# directory. These will be calculated with the vendor directory nearest the
# target package.
packages = ["github.com/gogo/protobuf"]
# This section maps protobuf imports to Go packages.
# This section maps protobuf imports to Go packages. These will become
# `-M` directives in the call to the go protobuf generator.
[packages]
# github.com/containerd/cgroups protofiles still list their go path as "github.com/containerd/cgroups/cgroup1/stats"
"github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto" = "github.com/containerd/cgroups/v3/cgroup1/stats"
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/struct.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
"github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1"
[[overrides]]
prefixes = [
"github.com/Microsoft/hcsshim/internal/shimdiag",
"github.com/Microsoft/hcsshim/internal/extendedtask",
"github.com/Microsoft/hcsshim/internal/computeagent",
"github.com/Microsoft/hcsshim/internal/ncproxyttrpc",
"github.com/Microsoft/hcsshim/internal/vmservice",
]
generators = ["go", "go-ttrpc"]
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/extendedtask"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"]
plugins = ["ttrpc"]

View File

@@ -16,9 +16,7 @@ import (
"github.com/Microsoft/hcsshim/internal/security"
)
const (
defaultVHDXBlockSizeInMB = 1
)
const defaultVHDXBlockSizeInMB = 1
// SetupContainerBaseLayer is a helper to setup a containers scratch. It
// will create and format the vhdx's inside and the size is configurable with the sizeInGB

View File

@@ -11,7 +11,7 @@ import (
//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer?
//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer?
//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestroyLayer?
//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer?
//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer?
//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer?
//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter?

View File

@@ -43,7 +43,7 @@ var (
modcomputestorage = windows.NewLazySystemDLL("computestorage.dll")
procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter")
procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer")
procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer")
procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter")
procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer")
procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd")
@@ -93,11 +93,11 @@ func hcsDestroyLayer(layerPath string) (hr error) {
}
func _hcsDestroyLayer(layerPath *uint16) (hr error) {
hr = procHcsDestroyLayer.Find()
hr = procHcsDestoryLayer.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsDestroyLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff

View File

@@ -12,16 +12,14 @@ import (
"syscall"
"time"
"go.opencensus.io/trace"
"github.com/Microsoft/hcsshim/internal/cow"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
"github.com/Microsoft/hcsshim/internal/vmcompute"
"go.opencensus.io/trace"
)
// ContainerError is an error encountered in HCS
type Process struct {
handleLock sync.RWMutex
handle vmcompute.HcsProcess
@@ -52,6 +50,35 @@ func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *Syst
}
}
type processModifyRequest struct {
Operation string
ConsoleSize *consoleSize `json:",omitempty"`
CloseHandle *closeHandle `json:",omitempty"`
}
type consoleSize struct {
Height uint16
Width uint16
}
type closeHandle struct {
Handle string
}
type processStatus struct {
ProcessID uint32
Exited bool
ExitCode uint32
LastWaitResult int32
}
const stdIn string = "StdIn"
const (
modifyConsoleSize string = "ConsoleSize"
modifyCloseHandle string = "CloseHandle"
)
// Pid returns the process ID of the process within the container.
func (process *Process) Pid() int {
return process.processID
@@ -233,14 +260,14 @@ func (process *Process) waitBackground() {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
// Make sure we didn't race with Close() here
// Make sure we didnt race with Close() here
if process.handle != 0 {
propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle)
events := processHcsResult(ctx, resultJSON)
if err != nil {
err = makeProcessError(process, operation, err, events)
} else {
properties := &hcsschema.ProcessStatus{}
properties := &processStatus{}
err = json.Unmarshal([]byte(propertiesJSON), properties)
if err != nil {
err = makeProcessError(process, operation, err, nil)
@@ -291,9 +318,10 @@ func (process *Process) ResizeConsole(ctx context.Context, width, height uint16)
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
modifyRequest := hcsschema.ProcessModifyRequest{
Operation: guestrequest.ModifyProcessConsoleSize,
ConsoleSize: &hcsschema.ConsoleSize{
modifyRequest := processModifyRequest{
Operation: modifyConsoleSize,
ConsoleSize: &consoleSize{
Height: height,
Width: width,
},
@@ -393,12 +421,18 @@ func (process *Process) CloseStdin(ctx context.Context) (err error) {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
process.stdioLock.Lock()
defer process.stdioLock.Unlock()
if process.stdin == nil {
return nil
}
//HcsModifyProcess request to close stdin will fail if the process has already exited
if !process.stopped() {
modifyRequest := hcsschema.ProcessModifyRequest{
Operation: guestrequest.CloseProcessHandle,
CloseHandle: &hcsschema.CloseHandle{
Handle: guestrequest.STDInHandle,
modifyRequest := processModifyRequest{
Operation: modifyCloseHandle,
CloseHandle: &closeHandle{
Handle: stdIn,
},
}
@@ -414,12 +448,8 @@ func (process *Process) CloseStdin(ctx context.Context) (err error) {
}
}
process.stdioLock.Lock()
defer process.stdioLock.Unlock()
if process.stdin != nil {
process.stdin.Close()
process.stdin = nil
}
process.stdin.Close()
process.stdin = nil
return nil
}

View File

@@ -1,25 +0,0 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.5
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
const (
CimMountFlagNone uint32 = 0x0
CimMountFlagChildOnly uint32 = 0x1
CimMountFlagEnableDax uint32 = 0x2
CimMountFlagCacheFiles uint32 = 0x4
CimMountFlagCacheRegions uint32 = 0x8
)
type CimMount struct {
ImagePath string `json:"ImagePath,omitempty"`
FileSystemName string `json:"FileSystemName,omitempty"`
VolumeGuid string `json:"VolumeGuid,omitempty"`
MountFlags uint32 `json:"MountFlags,omitempty"`
}

View File

@@ -9,8 +9,6 @@
package hcsschema
import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
type CloseHandle struct {
Handle guestrequest.STDIOHandle `json:"Handle,omitempty"` // NOTE: Swagger generated as string. Locally updated.
Handle string `json:"Handle,omitempty"`
}

View File

@@ -9,11 +9,8 @@
package hcsschema
// NOTE: Swagger generated fields as int32. Locally updated to uint16 to match documentation.
// https://learn.microsoft.com/en-us/virtualization/api/hcs/schemareference#ConsoleSize
type ConsoleSize struct {
Height uint16 `json:"Height,omitempty"`
Height int32 `json:"Height,omitempty"`
Width uint16 `json:"Width,omitempty"`
Width int32 `json:"Width,omitempty"`
}

View File

@@ -17,5 +17,5 @@ type IsolationSettings struct {
DebugPort int64 `json:"DebugPort,omitempty"`
// Optional data passed by host on isolated virtual machine start
LaunchData string `json:"LaunchData,omitempty"`
HclEnabled *bool `json:"HclEnabled,omitempty"`
HclEnabled bool `json:"HclEnabled,omitempty"`
}

View File

@@ -9,11 +9,9 @@
package hcsschema
import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
// Passed to HcsRpc_ModifyProcess
// Passed to HcsRpc_ModifyProcess
type ProcessModifyRequest struct {
Operation guestrequest.ProcessModifyOperation `json:"Operation,omitempty"` // NOTE: Swagger generated as string. Locally updated.
Operation string `json:"Operation,omitempty"`
ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"`

View File

@@ -9,16 +9,13 @@
package hcsschema
// NOTE: Swagger generated fields as int32. Locally updated to uint16 to match documentation.
// https://learn.microsoft.com/en-us/virtualization/api/hcs/schemareference#ConsoleSize
// Status of a process running in a container
// Status of a process running in a container
type ProcessStatus struct {
ProcessId uint32 `json:"ProcessId,omitempty"` // NOTE: Swagger generated as int32. Locally updated to match documentation.
ProcessId int32 `json:"ProcessId,omitempty"`
Exited bool `json:"Exited,omitempty"`
ExitCode uint32 `json:"ExitCode,omitempty"` // NOTE: Swagger generated as int32. Locally updated to match documentation.
ExitCode int32 `json:"ExitCode,omitempty"`
LastWaitResult int32 `json:"LastWaitResult,omitempty"`
}

View File

@@ -10,7 +10,7 @@
package hcsschema
import (
v1 "github.com/containerd/cgroups/v3/cgroup1/stats"
v1 "github.com/containerd/cgroups/stats/v1"
)
type Properties struct {

View File

@@ -304,22 +304,11 @@ func (computeSystem *System) WaitError() error {
return computeSystem.waitError
}
// Wait synchronously waits for the compute system to shutdown or terminate.
// If the compute system has already exited returns the previous error (if any).
// Wait synchronously waits for the compute system to shutdown or terminate. If
// the compute system has already exited returns the previous error (if any).
func (computeSystem *System) Wait() error {
return computeSystem.WaitCtx(context.Background())
}
// WaitCtx synchronously waits for the compute system to shutdown or terminate, or the context to be cancelled.
//
// See [System.Wait] for more information.
func (computeSystem *System) WaitCtx(ctx context.Context) error {
select {
case <-computeSystem.WaitChannel():
return computeSystem.WaitError()
case <-ctx.Done():
return ctx.Err()
}
<-computeSystem.WaitChannel()
return computeSystem.WaitError()
}
// stopped returns true if the compute system stopped.
@@ -746,17 +735,9 @@ func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process
}
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
func (computeSystem *System) Close() error {
return computeSystem.CloseCtx(context.Background())
}
// CloseCtx is similar to [System.Close], but accepts a context.
//
// The context is used for all operations, including waits, so timeouts/cancellations may prevent
// proper system cleanup.
func (computeSystem *System) CloseCtx(ctx context.Context) (err error) {
func (computeSystem *System) Close() (err error) {
operation := "hcs::System::Close"
ctx, span := oc.StartSpan(ctx, operation)
ctx, span := oc.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))

View File

@@ -167,7 +167,7 @@ func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
//
// Returns a JobObject structure and an error if there is one.
func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
if options == nil || options.Name == "" {
if options == nil || (options != nil && options.Name == "") {
return nil, errors.New("no job object name specified to open")
}

View File

@@ -9,16 +9,10 @@ import (
"reflect"
"time"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"github.com/containerd/containerd/log"
)
// TimeFormat is [time.RFC3339Nano] with nanoseconds padded using
// zeros to ensure the formatted time is always the same number of
// characters.
// Based on RFC3339NanoFixed from github.com/containerd/log
const TimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
const TimeFormat = log.RFC3339NanoFixed
func FormatTime(t time.Time) string {
return t.Format(TimeFormat)
@@ -65,48 +59,25 @@ func formatAddr(a net.Addr) string {
func Format(ctx context.Context, v interface{}) string {
b, err := encode(v)
if err != nil {
// logging errors aren't really warning worthy, and can potentially spam a lot of logs out
G(ctx).WithFields(logrus.Fields{
logrus.ErrorKey: err,
"type": fmt.Sprintf("%T", v),
}).Debug("could not format value")
G(ctx).WithError(err).Warning("could not format value")
return ""
}
return string(b)
}
func encode(v interface{}) (_ []byte, err error) {
if m, ok := v.(proto.Message); ok {
// use canonical JSON encoding for protobufs (instead of [encoding/json])
// https://protobuf.dev/programming-guides/proto3/#json
var b []byte
b, err = protojson.MarshalOptions{
AllowPartial: true,
// protobuf defaults to camel case for JSON encoding; use proto field name instead (snake case)
UseProtoNames: true,
}.Marshal(m)
if err == nil {
// the protojson marshaller tries to unmarshal anypb.Any fields, which can
// fail for types encoded with "github.com/containerd/typeurl/v2"
// we can try creating a dedicated protoregistry.MessageTypeResolver that uses typeurl, but, its
// more robust to fall back on json marshalling for errors in general
return b, nil
}
func encode(v interface{}) ([]byte, error) {
return encodeBuffer(&bytes.Buffer{}, v)
}
}
buf := &bytes.Buffer{}
func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) {
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.SetIndent("", "")
if jErr := enc.Encode(v); jErr != nil {
if err != nil {
// TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...)
return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr)
}
return nil, fmt.Errorf("json encoding: %w", jErr)
if err := enc.Encode(v); err != nil {
err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err)
return nil, err
}
// encoder.Encode appends a newline to the end

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -29,7 +30,7 @@ type Hook struct {
// An empty string disables formatting.
// When disabled, the fall back will the JSON encoding, if enabled.
//
// Default is [TimeFormat].
// Default is [github.com/containerd/containerd/log.RFC3339NanoFixed].
TimeFormat string
// Duration format converts a [time.Duration] fields to an appropriate encoding.
@@ -48,7 +49,7 @@ var _ logrus.Hook = &Hook{}
func NewHook() *Hook {
return &Hook{
TimeFormat: TimeFormat,
TimeFormat: log.RFC3339NanoFixed,
DurationFormat: DurationFormatString,
AddSpanContext: true,
}

View File

@@ -1,12 +0,0 @@
package log
import (
"github.com/sirupsen/logrus"
)
type NopFormatter struct{}
var _ logrus.Formatter = NopFormatter{}
// Format does nothing and returns a nil slice.
func (NopFormatter) Format(*logrus.Entry) ([]byte, error) { return nil, nil }

View File

@@ -55,7 +55,7 @@ func ScrubProcessParameters(s string) (string, error) {
}
pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
b, err := encode(pp)
b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp)
if err != nil {
return "", err
}
@@ -89,11 +89,11 @@ func scrubBridgeCreate(m genMap) error {
}
func scrubLinuxHostedSystem(m genMap) error {
if m, ok := index(m, "OciSpecification"); ok { //nolint:govet // shadow
if m, ok := index(m, "OciSpecification"); ok {
if _, ok := m["annotations"]; ok {
m["annotations"] = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
}
if m, ok := index(m, "process"); ok { //nolint:govet // shadow
if m, ok := index(m, "process"); ok {
if _, ok := m["env"]; ok {
m["env"] = []string{_scrubbedReplacement}
return nil
@@ -113,7 +113,7 @@ func scrubExecuteProcess(m genMap) error {
if !isRequestBase(m) {
return ErrUnknownType
}
if m, ok := index(m, "Settings"); ok { //nolint:govet // shadow
if m, ok := index(m, "Settings"); ok {
if ss, ok := m["ProcessParameters"]; ok {
// ProcessParameters is a json encoded struct passed as a regular sting field
s, ok := ss.(string)

View File

@@ -5,7 +5,7 @@ package guestrequest
type RequestType string
type ResourceType string
// RequestType const.
// RequestType const
const (
RequestTypeAdd RequestType = "Add"
RequestTypeRemove RequestType = "Remove"
@@ -54,23 +54,3 @@ var (
"305891a9-b251-5dfe-91a2-c25d9212275b",
}
)
// constants for v2 schema ProcessModifyRequest
// Operation type for [hcsschema.ProcessModifyRequest].
type ProcessModifyOperation string
const (
ModifyProcessConsoleSize ProcessModifyOperation = "ConsoleSize"
CloseProcessHandle ProcessModifyOperation = "CloseHandle"
)
// Standard IO handle(s) to close for [hcsschema.CloseHandle] in [hcsschema.ProcessModifyRequest].
type STDIOHandle string
const (
STDInHandle STDIOHandle = "StdIn"
STDOutHandle STDIOHandle = "StdOut"
STDErrHandle STDIOHandle = "StdErr"
AllHandles STDIOHandle = "All"
)

View File

@@ -276,7 +276,7 @@ func RemoveAllRelative(path string, root *os.File) error {
}
// It is necessary to use os.Open as Readdirnames does not work with
// OpenRelative. This is safe because the above LstatRelative fails
// OpenRelative. This is safe because the above lstatrelative fails
// if the target is outside the root, and we know this is not a
// symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check.
fd, err := os.Open(filepath.Join(root.Name(), path))
@@ -293,12 +293,12 @@ func RemoveAllRelative(path string, root *os.File) error {
for {
names, err1 := fd.Readdirnames(100)
for _, name := range names {
if err2 := RemoveAllRelative(path+string(os.PathSeparator)+name, root); err == nil {
err = err2
err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root)
if err == nil {
err = err1
}
}
if err1 == io.EOF {
// Readdirnames has no more files to return
break
}
// If Readdirnames returned an error, use it.

View File

@@ -72,8 +72,8 @@ func (r *baseLayerReader) walkUntilCancelled() error {
return err
}
utilityVMAbsPath := filepath.Join(r.root, UtilityVMPath)
utilityVMFilesAbsPath := filepath.Join(r.root, UtilityVMFilesPath)
utilityVMAbsPath := filepath.Join(r.root, utilityVMPath)
utilityVMFilesAbsPath := filepath.Join(r.root, utilityVMFilesPath)
// Ignore a UtilityVM without Files, that's not _really_ a UtiltyVM
if _, err = os.Lstat(utilityVMFilesAbsPath); err != nil {

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/longpath"
@@ -36,7 +37,7 @@ func ensureHive(path string, root *os.File) (err error) {
return fmt.Errorf("getting path: %w", err)
}
var key winapi.ORHKey
var key syscall.Handle
err = winapi.ORCreateHive(&key)
if err != nil {
return fmt.Errorf("creating hive: %w", err)
@@ -71,7 +72,7 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) {
}
}
stat, err := safefile.LstatRelative(UtilityVMFilesPath, root)
stat, err := safefile.LstatRelative(utilityVMFilesPath, root)
if os.IsNotExist(err) {
return false, nil
@@ -82,7 +83,7 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) {
}
if !stat.Mode().IsDir() {
fullPath := filepath.Join(root.Name(), UtilityVMFilesPath)
fullPath := filepath.Join(root.Name(), utilityVMFilesPath)
return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String())
}
@@ -91,7 +92,7 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) {
// Just check that this exists as a regular file. If it exists but is not a valid registry hive,
// ProcessUtilityVMImage will complain:
// "The registry could not read in, or write out, or flush, one of the files that contain the system's image of the registry."
bcdPath := filepath.Join(UtilityVMFilesPath, bcdRelativePath)
bcdPath := filepath.Join(utilityVMFilesPath, bcdRelativePath)
stat, err = safefile.LstatRelative(bcdPath, root)
if err != nil {
@@ -121,12 +122,12 @@ func convertToBaseLayer(ctx context.Context, root *os.File) error {
return nil
}
err = safefile.EnsureNotReparsePointRelative(UtilityVMPath, root)
err = safefile.EnsureNotReparsePointRelative(utilityVMPath, root)
if err != nil {
return err
}
utilityVMPath := filepath.Join(root.Name(), UtilityVMPath)
utilityVMPath := filepath.Join(root.Name(), utilityVMPath)
return ProcessUtilityVMImage(ctx, utilityVMPath)
}

View File

@@ -7,10 +7,6 @@ package wclayer
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"syscall"
"github.com/Microsoft/go-winio/pkg/guid"
@@ -105,23 +101,3 @@ func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]
return layers, nil
}
// GetLayerUvmBuild looks for a file named `uvmbuildversion` at `layerPath\uvmbuildversion` and returns the
// build number of the UVM from that file.
func GetLayerUvmBuild(layerPath string) (uint16, error) {
data, err := os.ReadFile(filepath.Join(layerPath, UvmBuildFileName))
if err != nil {
return 0, err
}
ver, err := strconv.ParseUint(string(data), 10, 16)
if err != nil {
return 0, err
}
return uint16(ver), nil
}
// WriteLayerUvmBuildFile writes a file at path `layerPath\uvmbuildversion` that contains the given `build`
// version for future reference.
func WriteLayerUvmBuildFile(layerPath string, build uint16) error {
return os.WriteFile(filepath.Join(layerPath, UvmBuildFileName), []byte(fmt.Sprintf("%d", build)), 0777)
}

View File

@@ -29,19 +29,10 @@ var mutatedUtilityVMFiles = map[string]bool{
}
const (
filesPath = `Files`
HivesPath = `Hives`
UtilityVMPath = `UtilityVM`
UtilityVMFilesPath = `UtilityVM\Files`
RegFilesPath = `Files\Windows\System32\config`
BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD`
BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi`
ContainerBaseVhd = `blank-base.vhdx`
ContainerScratchVhd = `blank.vhdx`
UtilityVMBaseVhd = `SystemTemplateBase.vhdx`
UtilityVMScratchVhd = `SystemTemplate.vhdx`
LayoutFileName = `layout`
UvmBuildFileName = `uvmbuildversion`
filesPath = `Files`
hivesPath = `Hives`
utilityVMPath = `UtilityVM`
utilityVMFilesPath = `UtilityVM\Files`
)
func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
@@ -252,11 +243,11 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil
if !hasPathPrefix(path, filesPath) {
size = fe.fi.Size()
r.backupReader = winio.NewBackupFileReader(f, false)
if path == HivesPath || path == filesPath {
if path == hivesPath || path == filesPath {
// The Hives directory has a non-deterministic file time because of the
// nature of the import process. Use the times from System_Delta.
var g *os.File
g, err = os.Open(filepath.Join(r.root, HivesPath, `System_Delta`))
g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`))
if err != nil {
return
}
@@ -418,7 +409,7 @@ func (w *legacyLayerWriter) CloseRoots() {
func (w *legacyLayerWriter) initUtilityVM() error {
if !w.HasUtilityVM {
err := safefile.MkdirRelative(UtilityVMPath, w.destRoot)
err := safefile.MkdirRelative(utilityVMPath, w.destRoot)
if err != nil {
return err
}
@@ -426,7 +417,7 @@ func (w *legacyLayerWriter) initUtilityVM() error {
// clone the utility VM from the parent layer into this layer. Use hard
// links to avoid unnecessary copying, since most of the files are
// immutable.
err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles)
err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles)
if err != nil {
return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
}
@@ -601,7 +592,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
return err
}
if name == UtilityVMPath {
if name == utilityVMPath {
return w.initUtilityVM()
}
@@ -610,11 +601,11 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
}
name = filepath.Clean(name)
if hasPathPrefix(name, UtilityVMPath) {
if hasPathPrefix(name, utilityVMPath) {
if !w.HasUtilityVM {
return errors.New("missing UtilityVM directory")
}
if !hasPathPrefix(name, UtilityVMFilesPath) && name != UtilityVMFilesPath {
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
return errors.New("invalid UtilityVM layer")
}
createDisposition := uint32(winapi.FILE_OPEN)
@@ -708,7 +699,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
return err
}
if hasPathPrefix(name, HivesPath) {
if hasPathPrefix(name, hivesPath) {
w.backupWriter = winio.NewBackupFileWriter(f, false)
w.bufWriter.Reset(w.backupWriter)
} else {
@@ -740,14 +731,14 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error {
// Look for cross-layer hard link targets in the parent layers, since
// nothing is in the destination path yet.
roots = w.parentRoots
} else if hasPathPrefix(target, UtilityVMFilesPath) {
} else if hasPathPrefix(target, utilityVMFilesPath) {
// Since the utility VM is fully cloned into the destination path
// already, look for cross-layer hard link targets directly in the
// destination path.
roots = []*os.File{w.destRoot}
}
if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, UtilityVMFilesPath)) {
if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) {
return errors.New("invalid hard link in layer")
}
@@ -786,7 +777,7 @@ func (w *legacyLayerWriter) Remove(name string) error {
name = filepath.Clean(name)
if hasPathPrefix(name, filesPath) {
w.Tombstones = append(w.Tombstones, name)
} else if hasPathPrefix(name, UtilityVMFilesPath) {
} else if hasPathPrefix(name, utilityVMFilesPath) {
err := w.initUtilityVM()
if err != nil {
return err

View File

@@ -1,45 +0,0 @@
package winapi
import (
"unsafe"
"github.com/Microsoft/go-winio/pkg/guid"
"golang.org/x/sys/windows"
)
type g = guid.GUID
type FsHandle uintptr
type StreamHandle uintptr
type CimFsFileMetadata struct {
Attributes uint32
FileSize int64
CreationTime windows.Filetime
LastWriteTime windows.Filetime
ChangeTime windows.Filetime
LastAccessTime windows.Filetime
SecurityDescriptorBuffer unsafe.Pointer
SecurityDescriptorSize uint32
ReparseDataBuffer unsafe.Pointer
ReparseDataSize uint32
ExtendedAttributes unsafe.Pointer
EACount uint32
}
//sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage?
//sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage?
//sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage?
//sys CimCloseImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCloseImage?
//sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage?
//sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile?
//sys CimCloseStream(cimStreamHandle StreamHandle) (hr error) = cimfs.CimCloseStream?
//sys CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) = cimfs.CimWriteStream?
//sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath?
//sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink?
//sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream?

View File

@@ -1,37 +0,0 @@
package winapi
// Offline registry management API
type ORHKey uintptr
type RegType uint32
const (
// Registry value types: https://docs.microsoft.com/en-us/windows/win32/sysinfo/registry-value-types
REG_TYPE_NONE RegType = 0
REG_TYPE_SZ RegType = 1
REG_TYPE_EXPAND_SZ RegType = 2
REG_TYPE_BINARY RegType = 3
REG_TYPE_DWORD RegType = 4
REG_TYPE_DWORD_LITTLE_ENDIAN RegType = 4
REG_TYPE_DWORD_BIG_ENDIAN RegType = 5
REG_TYPE_LINK RegType = 6
REG_TYPE_MULTI_SZ RegType = 7
REG_TYPE_RESOURCE_LIST RegType = 8
REG_TYPE_FULL_RESOURCE_DESCRIPTOR RegType = 9
REG_TYPE_RESOURCE_REQUIREMENTS_LIST RegType = 10
REG_TYPE_QWORD RegType = 11
REG_TYPE_QWORD_LITTLE_ENDIAN RegType = 11
)
//sys ORCreateHive(key *ORHKey) (win32err error) = offreg.ORCreateHive
//sys ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) = offreg.ORMergeHives
//sys OROpenHive(hivePath string, result *ORHKey) (win32err error) = offreg.OROpenHive
//sys ORCloseHive(handle ORHKey) (win32err error) = offreg.ORCloseHive
//sys ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) = offreg.ORSaveHive
//sys OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) = offreg.OROpenKey
//sys ORCloseKey(handle ORHKey) (win32err error) = offreg.ORCloseKey
//sys ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) = offreg.ORCreateKey
//sys ORDeleteKey(handle ORHKey, subKey string) (win32err error) = offreg.ORDeleteKey
//sys ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) = offreg.ORGetValue
//sys ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) = offreg.ORSetValue

View File

@@ -0,0 +1,5 @@
package winapi
//sys ORCreateHive(key *syscall.Handle) (regerrno error) = offreg.ORCreateHive
//sys ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) = offreg.ORSaveHive
//sys ORCloseHive(key syscall.Handle) (regerrno error) = offreg.ORCloseHive

View File

@@ -80,9 +80,3 @@ func ConvertStringSetToSlice(buf []byte) ([]string, error) {
}
return nil, errors.New("string set malformed: missing null terminator at end of buffer")
}
// ParseUtf16LE parses a UTF-16LE byte array into a string (without passing
// through a uint16 or rune array).
func ParseUtf16LE(b []byte) string {
return windows.UTF16PtrToString((*uint16)(unsafe.Pointer(&b[0])))
}

View File

@@ -43,7 +43,6 @@ var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll")
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
modcimfs = windows.NewLazySystemDLL("cimfs.dll")
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modnetapi32 = windows.NewLazySystemDLL("netapi32.dll")
@@ -56,17 +55,6 @@ var (
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW")
procCimCloseImage = modcimfs.NewProc("CimCloseImage")
procCimCloseStream = modcimfs.NewProc("CimCloseStream")
procCimCommitImage = modcimfs.NewProc("CimCommitImage")
procCimCreateAlternateStream = modcimfs.NewProc("CimCreateAlternateStream")
procCimCreateFile = modcimfs.NewProc("CimCreateFile")
procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink")
procCimCreateImage = modcimfs.NewProc("CimCreateImage")
procCimDeletePath = modcimfs.NewProc("CimDeletePath")
procCimDismountImage = modcimfs.NewProc("CimDismountImage")
procCimMountImage = modcimfs.NewProc("CimMountImage")
procCimWriteStream = modcimfs.NewProc("CimWriteStream")
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
procCopyFileW = modkernel32.NewProc("CopyFileW")
@@ -96,16 +84,8 @@ var (
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
procORCloseHive = modoffreg.NewProc("ORCloseHive")
procORCloseKey = modoffreg.NewProc("ORCloseKey")
procORCreateHive = modoffreg.NewProc("ORCreateHive")
procORCreateKey = modoffreg.NewProc("ORCreateKey")
procORDeleteKey = modoffreg.NewProc("ORDeleteKey")
procORGetValue = modoffreg.NewProc("ORGetValue")
procORMergeHives = modoffreg.NewProc("ORMergeHives")
procOROpenHive = modoffreg.NewProc("OROpenHive")
procOROpenKey = modoffreg.NewProc("OROpenKey")
procORSaveHive = modoffreg.NewProc("ORSaveHive")
procORSetValue = modoffreg.NewProc("ORSetValue")
)
func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
@@ -184,235 +164,6 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr
return
}
func CimCloseImage(cimFSHandle FsHandle) (hr error) {
hr = procCimCloseImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCloseStream(cimStreamHandle StreamHandle) (hr error) {
hr = procCimCloseStream.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCimCloseStream.Addr(), 1, uintptr(cimStreamHandle), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCommitImage(cimFSHandle FsHandle) (hr error) {
hr = procCimCommitImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCimCommitImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _CimCreateAlternateStream(cimFSHandle, _p0, size, cimStreamHandle)
}
func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64, cimStreamHandle *StreamHandle) (hr error) {
hr = procCimCreateAlternateStream.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procCimCreateAlternateStream.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _CimCreateFile(cimFSHandle, _p0, file, cimStreamHandle)
}
func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) {
hr = procCimCreateFile.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procCimCreateFile.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(newPath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(oldPath)
if hr != nil {
return
}
return _CimCreateHardLink(cimFSHandle, _p0, _p1)
}
func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) {
hr = procCimCreateHardLink.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCimCreateHardLink.Addr(), 3, uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(imagePath)
if hr != nil {
return
}
return _CimCreateImage(_p0, oldFSName, newFSName, cimFSHandle)
}
func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
hr = procCimCreateImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procCimCreateImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _CimDeletePath(cimFSHandle, _p0)
}
func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) {
hr = procCimDeletePath.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCimDeletePath.Addr(), 2, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimDismountImage(volumeID *g) (hr error) {
hr = procCimDismountImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCimDismountImage.Addr(), 1, uintptr(unsafe.Pointer(volumeID)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(imagePath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(fsName)
if hr != nil {
return
}
return _CimMountImage(_p0, _p1, flags, volumeID)
}
func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g) (hr error) {
hr = procCimMountImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procCimMountImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), 0, 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) {
hr = procCimWriteStream.Find()
if hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCimWriteStream.Addr(), 3, uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) {
r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0)
if r0 != 0 {
@@ -630,162 +381,35 @@ func RtlNtStatusToDosError(status uint32) (winerr error) {
return
}
func ORCloseHive(handle ORHKey) (win32err error) {
r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(handle), 0, 0)
func ORCloseHive(key syscall.Handle) (regerrno error) {
r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(key), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
regerrno = syscall.Errno(r0)
}
return
}
func ORCloseKey(handle ORHKey) (win32err error) {
r0, _, _ := syscall.Syscall(procORCloseKey.Addr(), 1, uintptr(handle), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func ORCreateHive(key *ORHKey) (win32err error) {
func ORCreateHive(key *syscall.Handle) (regerrno error) {
r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
regerrno = syscall.Errno(r0)
}
return
}
func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) {
func ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(subKey)
if win32err != nil {
_p0, regerrno = syscall.UTF16PtrFromString(file)
if regerrno != nil {
return
}
return _ORCreateKey(handle, _p0, class, options, securityDescriptor, result, disposition)
return _ORSaveHive(key, _p0, OsMajorVersion, OsMinorVersion)
}
func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) {
r0, _, _ := syscall.Syscall9(procORCreateKey.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)), 0, 0)
func _ORSaveHive(key syscall.Handle, file *uint16, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) {
r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(key), uintptr(unsafe.Pointer(file)), uintptr(OsMajorVersion), uintptr(OsMinorVersion), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func ORDeleteKey(handle ORHKey, subKey string) (win32err error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(subKey)
if win32err != nil {
return
}
return _ORDeleteKey(handle, _p0)
}
func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) {
r0, _, _ := syscall.Syscall(procORDeleteKey.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(subKey)), 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(subKey)
if win32err != nil {
return
}
var _p1 *uint16
_p1, win32err = syscall.UTF16PtrFromString(value)
if win32err != nil {
return
}
return _ORGetValue(handle, _p0, _p1, valueType, data, dataLen)
}
func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) {
r0, _, _ := syscall.Syscall6(procORGetValue.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen)))
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) {
var _p0 *ORHKey
if len(hiveHandles) > 0 {
_p0 = &hiveHandles[0]
}
r0, _, _ := syscall.Syscall(procORMergeHives.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func OROpenHive(hivePath string, result *ORHKey) (win32err error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(hivePath)
if win32err != nil {
return
}
return _OROpenHive(_p0, result)
}
func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) {
r0, _, _ := syscall.Syscall(procOROpenHive.Addr(), 2, uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)), 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(subKey)
if win32err != nil {
return
}
return _OROpenKey(handle, _p0, result)
}
func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) {
r0, _, _ := syscall.Syscall(procOROpenKey.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(hivePath)
if win32err != nil {
return
}
return _ORSaveHive(handle, _p0, osMajorVersion, osMinorVersion)
}
func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) {
r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion), 0, 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
}
return
}
func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(valueName)
if win32err != nil {
return
}
return _ORSetValue(handle, _p0, valueType, data, dataLen)
}
func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) {
r0, _, _ := syscall.Syscall6(procORSetValue.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen), 0)
if r0 != 0 {
win32err = syscall.Errno(r0)
regerrno = syscall.Errno(r0)
}
return
}

View File

@@ -32,7 +32,6 @@ func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPa
func DeactivateLayer(info DriverInfo, id string) error {
return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id))
}
func DestroyLayer(info DriverInfo, id string) error {
return wclayer.DestroyLayer(context.Background(), layerPath(&info, id))
}

View File

@@ -5,7 +5,6 @@ import (
"sync"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/registry"
)
// OSVersion is a wrapper for Windows version information
@@ -26,15 +25,16 @@ var (
// The calling application must be manifested to get the correct version information.
func Get() OSVersion {
once.Do(func() {
v := *windows.RtlGetVersion()
var err error
osv = OSVersion{}
osv.MajorVersion = uint8(v.MajorVersion)
osv.MinorVersion = uint8(v.MinorVersion)
osv.Build = uint16(v.BuildNumber)
// Fill version value so that existing clients don't break
osv.Version = v.BuildNumber << 16
osv.Version = osv.Version | (uint32(v.MinorVersion) << 8)
osv.Version = osv.Version | v.MajorVersion
osv.Version, err = windows.GetVersion()
if err != nil {
// GetVersion never fails.
panic(err)
}
osv.MajorVersion = uint8(osv.Version & 0xFF)
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
osv.Build = uint16(osv.Version >> 16)
})
return osv
}
@@ -57,18 +57,3 @@ func (osv OSVersion) String() string {
func (osv OSVersion) ToString() string {
return osv.String()
}
// Running `cmd /c ver` shows something like "10.0.20348.1000". The last component ("1000") is the revision
// number
func BuildRevision() (uint32, error) {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
if err != nil {
return 0, fmt.Errorf("open `CurrentVersion` registry key: %w", err)
}
defer k.Close()
s, _, err := k.GetIntegerValue("UBR")
if err != nil {
return 0, fmt.Errorf("read `UBR` from registry: %w", err)
}
return uint32(s), nil
}

View File

@@ -1,35 +0,0 @@
package osversion
// List of stable ABI compliant ltsc releases
// Note: List must be sorted in ascending order
var compatLTSCReleases = []uint16{
V21H2Server,
}
// CheckHostAndContainerCompat checks if given host and container
// OS versions are compatible.
// It includes support for stable ABI compliant versions as well.
// Every release after WS 2022 will support the previous ltsc
// container image. Stable ABI is in preview mode for windows 11 client.
// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility
func CheckHostAndContainerCompat(host, ctr OSVersion) bool {
// check major minor versions of host and guest
if host.MajorVersion != ctr.MajorVersion ||
host.MinorVersion != ctr.MinorVersion {
return false
}
// If host is < WS 2022, exact version match is required
if host.Build < V21H2Server {
return host.Build == ctr.Build
}
var supportedLtscRelease uint16
for i := len(compatLTSCReleases) - 1; i >= 0; i-- {
if host.Build >= compatLTSCReleases[i] {
supportedLtscRelease = compatLTSCReleases[i]
break
}
}
return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build
}

5
vendor/github.com/Microsoft/hcsshim/tools.go generated vendored Normal file
View File

@@ -0,0 +1,5 @@
//go:build tools
package hcsshim
import _ "github.com/Microsoft/go-winio/tools/mkwinsyscall"

View File

@@ -14,4 +14,4 @@
limitations under the License.
*/
package stats
package v1

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,7 @@
file {
name: "github.com/containerd/cgroups/cgroup1/stats/metrics.proto"
name: "github.com/containerd/cgroups/stats/v1/metrics.proto"
package: "io.containerd.cgroups.v1"
dependency: "gogoproto/gogo.proto"
message_type {
name: "Metrics"
field {
@@ -25,6 +26,9 @@ file {
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.CPUStat"
options {
65004: "CPU"
}
json_name: "cpu"
}
field {
@@ -171,6 +175,9 @@ file {
number: 4
label: LABEL_REPEATED
type: TYPE_UINT64
options {
65004: "PerCPU"
}
json_name: "perCpu"
}
}
@@ -212,6 +219,9 @@ file {
number: 2
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "RSS"
}
json_name: "rss"
}
field {
@@ -219,6 +229,9 @@ file {
number: 3
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "RSSHuge"
}
json_name: "rssHuge"
}
field {
@@ -331,6 +344,9 @@ file {
number: 19
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "TotalRSS"
}
json_name: "totalRss"
}
field {
@@ -338,6 +354,9 @@ file {
number: 20
label: LABEL_OPTIONAL
type: TYPE_UINT64
options {
65004: "TotalRSSHuge"
}
json_name: "totalRssHuge"
}
field {
@@ -454,6 +473,9 @@ file {
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".io.containerd.cgroups.v1.MemoryEntry"
options {
65004: "KernelTCP"
}
json_name: "kernelTcp"
}
}
@@ -764,8 +786,5 @@ file {
json_name: "nrIoWait"
}
}
options {
go_package: "github.com/containerd/cgroups/cgroup1/stats"
}
syntax: "proto3"
}

View File

@@ -2,12 +2,12 @@ syntax = "proto3";
package io.containerd.cgroups.v1;
option go_package = "github.com/containerd/cgroups/cgroup1/stats";
import "gogoproto/gogo.proto";
message Metrics {
repeated HugetlbStat hugetlb = 1;
PidsStat pids = 2;
CPUStat cpu = 3;
CPUStat cpu = 3 [(gogoproto.customname) = "CPU"];
MemoryStat memory = 4;
BlkIOStat blkio = 5;
RdmaStat rdma = 6;
@@ -38,7 +38,7 @@ message CPUUsage {
uint64 total = 1;
uint64 kernel = 2;
uint64 user = 3;
repeated uint64 per_cpu = 4;
repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"];
}
@@ -50,8 +50,8 @@ message Throttle {
message MemoryStat {
uint64 cache = 1;
uint64 rss = 2;
uint64 rss_huge = 3;
uint64 rss = 2 [(gogoproto.customname) = "RSS"];
uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"];
uint64 mapped_file = 4;
uint64 dirty = 5;
uint64 writeback = 6;
@@ -67,8 +67,8 @@ message MemoryStat {
uint64 hierarchical_memory_limit = 16;
uint64 hierarchical_swap_limit = 17;
uint64 total_cache = 18;
uint64 total_rss = 19;
uint64 total_rss_huge = 20;
uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"];
uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"];
uint64 total_mapped_file = 21;
uint64 total_dirty = 22;
uint64 total_writeback = 23;
@@ -84,7 +84,7 @@ message MemoryStat {
MemoryEntry usage = 33;
MemoryEntry swap = 34;
MemoryEntry kernel = 35;
MemoryEntry kernel_tcp = 36;
MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"];
}

File diff suppressed because it is too large Load Diff

72
vendor/github.com/containerd/containerd/log/context.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"context"
"github.com/sirupsen/logrus"
)
var (
// G is an alias for GetLogger.
//
// We may want to define this locally to a package to get package tagged log
// messages.
G = GetLogger
// L is an alias for the standard logger.
L = logrus.NewEntry(logrus.StandardLogger())
)
type (
loggerKey struct{}
// Fields type to pass to `WithFields`, alias from `logrus`.
Fields = logrus.Fields
)
const (
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time is always the same number of characters.
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// TextFormat represents the text logging format
TextFormat = "text"
// JSONFormat represents the JSON logging format
JSONFormat = "json"
)
// WithLogger returns a new context with the provided logger. Use in
// combination with logger.WithField(s) for great effect.
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
e := logger.WithContext(ctx)
return context.WithValue(ctx, loggerKey{}, e)
}
// GetLogger retrieves the current logger from the context. If no logger is
// available, the default logger is returned.
func GetLogger(ctx context.Context) *logrus.Entry {
logger := ctx.Value(loggerKey{})
if logger == nil {
return L.WithContext(ctx)
}
return logger.(*logrus.Entry)
}

View File

@@ -436,8 +436,9 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
if err != nil {
if err == io.EOF {
break
} else {
return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
switch cleanEntryName(h.Name) {
case PrefetchLandmark, NoPrefetchLandmark:

View File

@@ -10,14 +10,13 @@ import (
"path/filepath"
"strings"
passwd "github.com/containers/common/pkg/password"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/sirupsen/logrus"
terminal "golang.org/x/term"
)
// ErrNewCredentialsInvalid means that the new user-provided credentials are
@@ -40,46 +39,33 @@ func (e ErrNewCredentialsInvalid) Unwrap() error {
// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default
// --authfile path used in multiple --authfile flag definitions
// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set
//
// WARNINGS:
// - In almost all invocations, expect this function to return ""; so it can not be used
// for directly accessing the file.
// - Use this only for commands that _read_ credentials, not write them.
// The path may refer to github.com/containers auth.json, or to Docker config.json,
// and the distinction is lost; writing auth.json data to config.json may not be consumable by Docker,
// or it may overwrite and discard unrelated Docker configuration set by the user.
func GetDefaultAuthFile() string {
// Keep this in sync with the default logic in systemContextWithOptions!
if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" {
return authfile
}
// This pre-existing behavior is not conceptually consistent:
// If users have a ~/.docker/config.json in the default path, and no environment variable
// set, we read auth.json first, falling back to config.json;
// but if DOCKER_CONFIG is set, we read only config.json in that path, and we dont read auth.json at all.
if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" {
return filepath.Join(authEnv, "config.json")
}
return ""
}
// CheckAuthFile validates a path option, failing if the option is set but the referenced file is not accessible.
func CheckAuthFile(pathOption string) error {
if pathOption == "" {
// CheckAuthFile validates filepath given by --authfile
// used by command has --authfile flag
func CheckAuthFile(authfile string) error {
if authfile == "" {
return nil
}
if _, err := os.Stat(pathOption); err != nil {
return fmt.Errorf("credential file is not accessible: %w", err)
if _, err := os.Stat(authfile); err != nil {
return fmt.Errorf("checking authfile: %w", err)
}
return nil
}
// systemContextWithOptions returns a version of sys
// updated with authFile, dockerCompatAuthFile and certDir values (if they are not "").
// updated with authFile and certDir values (if they are not "").
// NOTE: this is a shallow copy that can be used and updated, but may share
// data with the original parameter.
func systemContextWithOptions(sys *types.SystemContext, authFile, dockerCompatAuthFile, certDir string) (*types.SystemContext, error) {
func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext {
if sys != nil {
sysCopy := *sys
sys = &sysCopy
@@ -87,50 +73,24 @@ func systemContextWithOptions(sys *types.SystemContext, authFile, dockerCompatAu
sys = &types.SystemContext{}
}
defaultDockerConfigPath := filepath.Join(homedir.Get(), ".docker", "config.json")
switch {
case authFile != "" && dockerCompatAuthFile != "":
return nil, errors.New("options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")
case authFile != "":
if authFile == defaultDockerConfigPath {
logrus.Warn("saving credentials to ~/.docker/config.json, but not using Docker-compatible file format")
}
if authFile != "" {
sys.AuthFilePath = authFile
case dockerCompatAuthFile != "":
sys.DockerCompatAuthFilePath = dockerCompatAuthFile
default:
// Keep this in sync with GetDefaultAuthFile()!
//
// Note that c/image does not natively implement the REGISTRY_AUTH_FILE
// variable, so not all callers look for credentials in this location.
if authFileVar := os.Getenv("REGISTRY_AUTH_FILE"); authFileVar != "" {
if authFileVar == defaultDockerConfigPath {
logrus.Warn("$REGISTRY_AUTH_FILE points to ~/.docker/config.json, but the file format is not fully compatible; use the Docker-compatible file path option instead")
}
sys.AuthFilePath = authFileVar
} else if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
// This preserves pre-existing _inconsistent_ behavior:
// If the Docker configuration exists in the default ~/.docker/config.json location,
// we DO NOT write to it; instead, we update auth.json in the default path.
// Only if the user explicitly sets DOCKER_CONFIG, we write to that config.json.
sys.DockerCompatAuthFilePath = filepath.Join(dockerConfig, "config.json")
}
}
if certDir != "" {
sys.DockerCertPath = certDir
}
return sys, nil
return sys
}
// Login implements a “log in” command with the provided opts and args
// reading the password from opts.Stdin or the options in opts.
func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error {
systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, opts.CertDir)
if err != nil {
return err
}
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir)
var key, registry string
var (
key, registry string
err error
)
switch len(args) {
case 0:
if !opts.AcceptUnspecifiedRegistry {
@@ -299,7 +259,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
if err != nil {
return "", "", fmt.Errorf("reading username: %w", err)
}
// If the user just hit enter, use the displayed user from
// If the user just hit enter, use the displayed user from the
// the authentication file. This allows to do a lazy
// `$ buildah login -p $NEW_PASSWORD` without specifying the
// user.
@@ -309,7 +269,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
}
if password == "" {
fmt.Fprint(opts.Stdout, "Password: ")
pass, err := passwd.Read(int(os.Stdin.Fd()))
pass, err := terminal.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
return "", "", fmt.Errorf("reading password: %w", err)
}
@@ -324,13 +284,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
if err := CheckAuthFile(opts.AuthFile); err != nil {
return err
}
if err := CheckAuthFile(opts.DockerCompatAuthFile); err != nil {
return err
}
systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, "")
if err != nil {
return err
}
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "")
if opts.All {
if len(args) != 0 {
@@ -343,7 +297,10 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
return nil
}
var key, registry string
var (
key, registry string
err error
)
switch len(args) {
case 0:
if !opts.AcceptUnspecifiedRegistry {
@@ -379,7 +336,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry)
if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil {
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key) //nolint:forbidigo
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
return nil
}
return fmt.Errorf("not logged into %s", key)

View File

@@ -14,15 +14,14 @@ type LoginOptions struct {
// CLI flags managed by the FlagSet returned by GetLoginFlags
// Callers that use GetLoginFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
AuthFile string
DockerCompatAuthFile string
CertDir string
Password string
Username string
StdinPassword bool
GetLoginSet bool
Verbose bool // set to true for verbose output
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
AuthFile string
CertDir string
Password string
Username string
StdinPassword bool
GetLoginSet bool
Verbose bool // set to true for verbose output
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdin io.Reader // set to os.Stdin
Stdout io.Writer // set to os.Stdout
@@ -35,10 +34,9 @@ type LogoutOptions struct {
// CLI flags managed by the FlagSet returned by GetLogoutFlags
// Callers that use GetLogoutFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
AuthFile string
DockerCompatAuthFile string
All bool
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
AuthFile string
All bool
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdout io.Writer // set to os.Stdout
AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry
@@ -47,8 +45,7 @@ type LogoutOptions struct {
// GetLoginFlags defines and returns login flags for containers tools
func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead")
fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.StringVarP(&flags.Password, "password", "p", "", "Password for registry")
fs.StringVarP(&flags.Username, "username", "u", "", "Username for registry")
@@ -62,7 +59,6 @@ func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet {
func GetLoginFlagsCompletions() completion.FlagCompletions {
flagCompletion := completion.FlagCompletions{}
flagCompletion["authfile"] = completion.AutocompleteDefault
flagCompletion["compat-auth-file"] = completion.AutocompleteDefault
flagCompletion["cert-dir"] = completion.AutocompleteDefault
flagCompletion["password"] = completion.AutocompleteNone
flagCompletion["username"] = completion.AutocompleteNone
@@ -72,8 +68,7 @@ func GetLoginFlagsCompletions() completion.FlagCompletions {
// GetLogoutFlags defines and returns logout flags for containers tools
func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead")
fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.BoolVarP(&flags.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file")
return &fs
}
@@ -82,6 +77,5 @@ func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet {
func GetLogoutFlagsCompletions() completion.FlagCompletions {
flagCompletion := completion.FlagCompletions{}
flagCompletion["authfile"] = completion.AutocompleteDefault
flagCompletion["compat-auth-file"] = completion.AutocompleteDefault
return flagCompletion
}

View File

@@ -165,7 +165,7 @@ func (ob *optionalIntValue) String() string {
if !ob.present {
return "" // If the value is not present, just return an empty string, any other value wouldn't make sense.
}
return strconv.Itoa(ob.value)
return strconv.Itoa(int(ob.value))
}
// Type returns the int's type.

View File

@@ -1,57 +0,0 @@
//go:build linux || darwin || freebsd
// +build linux darwin freebsd
package password
import (
"errors"
"os"
"os/signal"
"syscall"
terminal "golang.org/x/term"
)
var ErrInterrupt = errors.New("interrupted")
// Read reads a password from the terminal without echo.
func Read(fd int) ([]byte, error) {
// Store and restore the terminal status on interruptions to
// avoid that the terminal remains in the password state
// This is necessary as for https://github.com/golang/go/issues/31180
oldState, err := terminal.GetState(fd)
if err != nil {
return make([]byte, 0), err
}
type Buffer struct {
Buffer []byte
Error error
}
errorChannel := make(chan Buffer, 1)
// SIGINT and SIGTERM restore the terminal, otherwise the no-echo mode would remain intact
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM)
defer func() {
signal.Stop(interruptChannel)
close(interruptChannel)
}()
go func() {
for range interruptChannel {
if oldState != nil {
_ = terminal.Restore(fd, oldState)
}
errorChannel <- Buffer{Buffer: make([]byte, 0), Error: ErrInterrupt}
}
}()
go func() {
buf, err := terminal.ReadPassword(fd)
errorChannel <- Buffer{Buffer: buf, Error: err}
}()
buf := <-errorChannel
return buf.Buffer, buf.Error
}

View File

@@ -1,21 +0,0 @@
//go:build windows
// +build windows
package password
import (
terminal "golang.org/x/term"
)
// Read reads a password from the terminal.
func Read(fd int) ([]byte, error) {
oldState, err := terminal.GetState(fd)
if err != nil {
return make([]byte, 0), err
}
buf, err := terminal.ReadPassword(fd)
if oldState != nil {
_ = terminal.Restore(fd, oldState)
}
return buf, err
}

View File

@@ -51,7 +51,8 @@ func Split(src string) (entries []string) {
}
entries = []string{}
var runes [][]rune
var class, lastClass int
lastClass := 0
class := 0
// split into fields based on class of unicode character
for _, r := range src {
switch {

View File

@@ -74,6 +74,7 @@ func IsErrorRetryable(err error) bool {
}
switch e := err.(type) {
case errcode.Error:
switch e.Code {
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied,

View File

@@ -83,12 +83,12 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
return types.BlobInfo{}, err
}
// === Report progress using the ic.c.options.Progress channel, if required.
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
// === Report progress using the ic.c.progress channel, if required.
if ic.c.progress != nil && ic.c.progressInterval > 0 {
progressReader := newProgressReader(
stream.reader,
ic.c.options.Progress,
ic.c.options.ProgressInterval,
ic.c.progress,
ic.c.progressInterval,
srcInfo,
)
defer progressReader.reportDone()

View File

@@ -284,24 +284,10 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms.
// There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression,
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
//
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
// inconsistent data to be logged.
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms, see above.
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
return nil
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
compression "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/signature/signer"
"github.com/containers/image/v5/transports"
@@ -127,58 +126,36 @@ type Options struct {
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
// to not indicate "nondistributable".
DownloadForeignLayers bool
// Contains slice of OptionCompressionVariant, where copy will ensure that for each platform
// in the manifest list, a variant with the requested compression will exist.
// Invalid when copying a non-multi-architecture image. That will probably
// change in the future.
EnsureCompressionVariantsExist []OptionCompressionVariant
// ForceCompressionFormat ensures that the compression algorithm set in
// DestinationCtx.CompressionFormat is used exclusively, and blobs of other
// compression algorithms are not reused.
ForceCompressionFormat bool
}
// OptionCompressionVariant allows to supply information about
// selected compression algorithm and compression level by the
// end-user. Refer to EnsureCompressionVariantsExist to know
// more about its usage.
type OptionCompressionVariant struct {
Algorithm compression.Algorithm
Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance
}
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
// The owner must call close() when done.
type copier struct {
policyContext *signature.PolicyContext
dest private.ImageDestination
rawSource private.ImageSource
options *Options // never nil
reportWriter io.Writer
progressOutput io.Writer
unparsedToplevel *image.UnparsedImage // for rawSource
dest private.ImageDestination
rawSource private.ImageSource
reportWriter io.Writer
progressOutput io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
blobInfoCache internalblobinfocache.BlobInfoCache2
ociDecryptConfig *encconfig.DecryptConfig
ociEncryptConfig *encconfig.EncryptConfig
concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs
signers []*signer.Signer // Signers to use to create new signatures for the image
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
}
// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions
func shouldRequireCompressionFormatMatch(options *Options) (bool, error) {
if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) {
return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format")
}
return options.ForceCompressionFormat, nil
downloadForeignLayers bool
signers []*signer.Signer // Signers to use to create new signatures for the image
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
}
// Image copies image from srcRef to destRef, using policyContext to validate
// source image admissibility. It returns the manifest which was written to
// the new copy of the image.
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) {
// NOTE this function uses an output parameter for the error return value.
// Setting this and returning is the ideal way to return an error.
//
// the defers in this routine will wrap the error return with its own errors
// which can be valuable context in the middle of a multi-streamed copy.
if options == nil {
options = &Options{}
}
@@ -232,29 +209,27 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
c := &copier{
policyContext: policyContext,
dest: dest,
rawSource: rawSource,
options: options,
reportWriter: reportWriter,
progressOutput: progressOutput,
unparsedToplevel: image.UnparsedInstance(rawSource, nil),
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressOutput: progressOutput,
progressInterval: options.ProgressInterval,
progress: options.Progress,
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more).
// Conceptually the cache settings should be in copy.Options instead.
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing?
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
ociDecryptConfig: options.OciDecryptConfig,
ociEncryptConfig: options.OciEncryptConfig,
downloadForeignLayers: options.DownloadForeignLayers,
}
defer c.close()
c.blobInfoCache.Open()
defer c.blobInfoCache.Close()
// Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore
c.concurrentBlobCopiesSemaphore = options.ConcurrentBlobCopiesSemaphore
if c.concurrentBlobCopiesSemaphore == nil {
max := c.options.MaxParallelDownloads
max := options.MaxParallelDownloads
if max == 0 {
max = maxParallelDownloads
}
@@ -262,48 +237,33 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
} else {
c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1))
if c.options.ConcurrentBlobCopiesSemaphore != nil {
if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
if options.ConcurrentBlobCopiesSemaphore != nil {
if err := options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err)
}
defer c.options.ConcurrentBlobCopiesSemaphore.Release(1)
defer options.ConcurrentBlobCopiesSemaphore.Release(1)
}
}
if err := c.setupSigners(); err != nil {
if err := c.setupSigners(options); err != nil {
return nil, err
}
multiImage, err := isMultiImage(ctx, c.unparsedToplevel)
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
multiImage, err := isMultiImage(ctx, unparsedToplevel)
if err != nil {
return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
}
if !multiImage {
if len(options.EnsureCompressionVariantsExist) > 0 {
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
}
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
// The simple case: just copy a single image.
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
if err != nil {
return nil, err
}
copiedManifest = single.manifest
} else if c.options.ImageListSelection == CopySystemImage {
if len(options.EnsureCompressionVariantsExist) > 0 {
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
}
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil {
return nil, err
}
} else if options.ImageListSelection == CopySystemImage {
// This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
// matches the current system to copy, and copy it.
mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
}
@@ -311,35 +271,34 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if err != nil {
return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
}
instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx
instanceDigest, err := manifestList.ChooseInstanceByCompression(options.SourceCtx, options.PreferGzipInstances) // try to pick one that matches options.SourceCtx
if err != nil {
return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
}
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
if err != nil {
if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
return nil, fmt.Errorf("copying system image from manifest list: %w", err)
}
copiedManifest = single.manifest
} else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
// If we were asked to copy multiple images and can't, that's an error.
if !supportsMultipleImages(c.dest) {
return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
}
// Copy some or all of the images.
switch c.options.ImageListSelection {
switch options.ImageListSelection {
case CopyAllImages:
logrus.Debugf("Source is a manifest list; copying all instances")
case CopySpecificImages:
logrus.Debugf("Source is a manifest list; copying some instances")
}
if copiedManifest, err = c.copyMultipleImages(ctx); err != nil {
if copiedManifest, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
return nil, err
}
}
if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil {
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
return nil, fmt.Errorf("committing the finished image: %w", err)
}

View File

@@ -34,7 +34,7 @@ type bpDecryptionStepData struct {
// srcInfo is only used for error messages.
// Returns data for other steps; the caller should eventually use updateCryptoOperation.
func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil {
if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil {
return &bpDecryptionStepData{
decrypting: false,
}, nil
@@ -47,7 +47,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
desc := imgspecv1.Descriptor{
Annotations: stream.info.Annotations,
}
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false)
if err != nil {
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
}
@@ -70,7 +70,7 @@ func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypt
}
}
// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step.
// bpdData contains data that the copy pipeline needs about the encryption step.
type bpEncryptionStepData struct {
encrypting bool // We are actually encrypting the stream
finalizer ocicrypt.EncryptLayerFinalizer
@@ -81,7 +81,7 @@ type bpEncryptionStepData struct {
// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil {
if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil {
return &bpEncryptionStepData{
encrypting: false,
}, nil
@@ -101,7 +101,7 @@ func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncryp
Size: srcInfo.Size,
Annotations: annotations,
}
reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc)
reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc)
if err != nil {
return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
}

View File

@@ -5,19 +5,16 @@ import (
"context"
"errors"
"fmt"
"sort"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/signature"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
@@ -31,134 +28,30 @@ const (
type instanceCopy struct {
op instanceCopyKind
sourceDigest digest.Digest
// Fields which can be used by callers when operation
// is `instanceCopyCopy`
copyForceCompressionFormat bool
// Fields which can be used by callers when operation
// is `instanceCopyClone`
cloneCompressionVariant OptionCompressionVariant
clonePlatform *imgspecv1.Platform
cloneAnnotations map[string]string
}
// internal type only to make imgspecv1.Platform comparable
type platformComparable struct {
architecture string
os string
osVersion string
osFeatures string
variant string
}
// Converts imgspecv1.Platform to a comparable format.
func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable {
if platform == nil {
return platformComparable{}
}
osFeatures := slices.Clone(platform.OSFeatures)
sort.Strings(osFeatures)
return platformComparable{architecture: platform.Architecture,
os: platform.OS,
// This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now.
osFeatures: strings.Join(osFeatures, ","),
osVersion: platform.OSVersion,
variant: platform.Variant,
}
}
// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests
func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) {
res := make(map[platformComparable]*set.Set[string])
for _, instanceDigest := range instanceDigests {
instanceDetails, err := list.Instance(instanceDigest)
if err != nil {
return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
}
platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
platformSet, ok := res[platform]
if !ok {
platformSet = set.New[string]()
res[platform] = platformSet
}
platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames)
}
return res, nil
}
func validateCompressionVariantExists(input []OptionCompressionVariant) error {
for _, option := range input {
_, err := compression.AlgorithmByName(option.Algorithm.Name())
if err != nil {
return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err)
}
}
return nil
}
// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list.
func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) {
func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy {
res := []instanceCopy{}
if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 {
// List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist`
// Its unclear what it means when `CopySpecificImages` includes an instance in options.Instances,
// EnsureCompressionVariantsExist asks for an instance with some compression,
// an instance with that compression already exists, but is not included in options.Instances.
// We might define the semantics and implement this in the future.
return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages")
}
err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist)
if err != nil {
return res, err
}
compressionsByPlatform, err := platformCompressionMap(list, instanceDigests)
if err != nil {
return nil, err
}
for i, instanceDigest := range instanceDigests {
if options.ImageListSelection == CopySpecificImages &&
!slices.Contains(options.Instances, instanceDigest) {
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
continue
}
instanceDetails, err := list.Instance(instanceDigest)
if err != nil {
return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
}
forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
res = append(res, instanceCopy{
op: instanceCopyCopy,
sourceDigest: instanceDigest,
copyForceCompressionFormat: forceCompressionFormat,
op: instanceCopyCopy,
sourceDigest: instanceDigest,
})
platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
compressionList := compressionsByPlatform[platform]
for _, compressionVariant := range options.EnsureCompressionVariantsExist {
if !compressionList.Contains(compressionVariant.Algorithm.Name()) {
res = append(res, instanceCopy{
op: instanceCopyClone,
sourceDigest: instanceDigest,
cloneCompressionVariant: compressionVariant,
clonePlatform: instanceDetails.ReadOnly.Platform,
cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations),
})
// add current compression to the list so that we dont create duplicate clones
compressionList.Add(compressionVariant.Algorithm.Name())
}
}
}
return res, nil
return res
}
// copyMultipleImages copies some or all of an image list's instances, using
// c.policyContext to validate source image admissibility.
func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) {
// policyContext to validate source image admissibility.
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
// Parse the list and get a copy of the original value after it's re-encoded.
manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx)
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("reading manifest list: %w", err)
}
@@ -168,7 +61,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
}
updatedList := originalList.CloneInternal()
sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel,
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
"Getting image list signatures",
"Checking if image list destination supports signatures")
if err != nil {
@@ -201,12 +94,12 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
if destIsDigestedReference {
cannotModifyManifestListReason = "Destination specifies a digest"
}
if c.options.PreserveDigests {
if options.PreserveDigests {
cannotModifyManifestListReason = "Instructed to preserve digests"
}
// Determine if we'll need to convert the manifest list to a different format.
forceListMIMEType := c.options.ForceManifestMIMEType
forceListMIMEType := options.ForceManifestMIMEType
switch forceListMIMEType {
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
forceListMIMEType = manifest.DockerV2ListMediaType
@@ -226,11 +119,8 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
// Copy each image, or just the ones we want to copy, in turn.
instanceDigests := updatedList.Instances()
instanceEdits := []internalManifest.ListEdit{}
instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options)
if err != nil {
return nil, fmt.Errorf("preparing instances for copy: %w", err)
}
c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests))
instanceCopyList := prepareInstanceCopies(instanceDigests, options)
c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests))
for i, instance := range instanceCopyList {
// Update instances to be edited by their `ListOperation` and
// populate necessary fields.
@@ -239,39 +129,17 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat})
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest)
if err != nil {
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
}
// Record the result of a possible conversion here.
instanceEdits = append(instanceEdits, internalManifest.ListEdit{
ListOperation: internalManifest.ListOpUpdate,
UpdateOldDigest: instance.sourceDigest,
UpdateDigest: updated.manifestDigest,
UpdateSize: int64(len(updated.manifest)),
UpdateCompressionAlgorithms: updated.compressionAlgorithms,
UpdateMediaType: updated.manifestMIMEType})
case instanceCopyClone:
logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{
requireCompressionFormatMatch: true,
compressionFormat: &instance.cloneCompressionVariant.Algorithm,
compressionLevel: instance.cloneCompressionVariant.Level})
if err != nil {
return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
}
// Record the result of a possible conversion here.
instanceEdits = append(instanceEdits, internalManifest.ListEdit{
ListOperation: internalManifest.ListOpAdd,
AddDigest: updated.manifestDigest,
AddSize: int64(len(updated.manifest)),
AddMediaType: updated.manifestMIMEType,
AddPlatform: instance.clonePlatform,
AddAnnotations: instance.cloneAnnotations,
AddCompressionAlgorithms: updated.compressionAlgorithms,
})
ListOperation: internalManifest.ListOpUpdate,
UpdateOldDigest: instance.sourceDigest,
UpdateDigest: updatedManifestDigest,
UpdateSize: int64(len(updatedManifest)),
UpdateMediaType: updatedManifestType})
default:
return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
}
@@ -336,11 +204,11 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
}
// Sign the manifest list.
newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity)
newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity)
if err != nil {
return nil, err
}
sigs = append(slices.Clone(sigs), newSigs...)
sigs = append(sigs, newSigs...)
c.Printf("Storing list signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {

View File

@@ -84,8 +84,6 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
),
mpb.AppendDecorators(
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
decor.Name(" | "),
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
),
)
}
@@ -96,9 +94,6 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
),
)
}
return &progressBar{

View File

@@ -13,20 +13,20 @@ import (
"github.com/containers/image/v5/transports"
)
// setupSigners initializes c.signers.
func (c *copier) setupSigners() error {
c.signers = append(c.signers, c.options.Signers...)
// c.signersToClose is intentionally not updated with c.options.Signers.
// setupSigners initializes c.signers based on options.
func (c *copier) setupSigners(options *Options) error {
c.signers = append(c.signers, options.Signers...)
// c.signersToClose is intentionally not updated with options.Signers.
// We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we dont need
// to clean up any created signers on failure.
if c.options.SignBy != "" {
if options.SignBy != "" {
opts := []simplesigning.Option{
simplesigning.WithKeyFingerprint(c.options.SignBy),
simplesigning.WithKeyFingerprint(options.SignBy),
}
if c.options.SignPassphrase != "" {
opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase))
if options.SignPassphrase != "" {
opts = append(opts, simplesigning.WithPassphrase(options.SignPassphrase))
}
signer, err := simplesigning.NewSigner(opts...)
if err != nil {
@@ -36,9 +36,9 @@ func (c *copier) setupSigners() error {
c.signersToClose = append(c.signersToClose, signer)
}
if c.options.SignBySigstorePrivateKeyFile != "" {
if options.SignBySigstorePrivateKeyFile != "" {
signer, err := sigstore.NewSigner(
sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase),
sigstore.WithPrivateKeyFile(options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase),
)
if err != nil {
return err
@@ -50,13 +50,13 @@ func (c *copier) setupSigners() error {
return nil
}
// sourceSignatures returns signatures from unparsedSource,
// sourceSignatures returns signatures from unparsedSource based on options,
// and verifies that they can be used (to avoid copying a large image when we
// can tell in advance that it would ultimately fail)
func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage,
func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, options *Options,
gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) {
var sigs []internalsig.Signature
if c.options.RemoveSignatures {
if options.RemoveSignatures {
sigs = []internalsig.Signature{}
} else {
c.Printf("%s\n", gettingSignaturesMessage)

View File

@@ -18,6 +18,7 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@@ -29,54 +30,40 @@ import (
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
c *copier
manifestUpdates *types.ManifestUpdateOptions
src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
requireCompressionFormatMatch bool
c *copier
manifestUpdates *types.ManifestUpdateOptions
src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
ociEncryptLayers *[]int
}
type copySingleImageOptions struct {
requireCompressionFormatMatch bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
}
// copySingleImageResult carries data produced by copySingleImage
type copySingleImageResult struct {
manifest []byte
manifestMIMEType string
manifestDigest digest.Digest
compressionAlgorithms []compressiontypes.Algorithm
}
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
// source image admissibility.
func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) {
func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
// Make sure we fail cleanly in such cases.
multiImage, err := isMultiImage(ctx, unparsedImage)
if err != nil {
// FIXME FIXME: How to name a reference for the sub-image?
return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
}
if multiImage {
return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
}
// Please keep this policy check BEFORE reading any other information about the image.
// (The multiImage check above only matches the MIME type, which we have received anyway.
// Actual parsing of anything should be deferred.)
if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err)
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
}
src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage)
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
if err != nil {
return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
@@ -88,33 +75,33 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
manifestList, _, err := c.unparsedToplevel.Manifest(ctx)
manifestList, _, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err)
return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
}
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference")
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
}
}
}
}
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
return copySingleImageResult{}, err
if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
return nil, "", "", err
}
sigs, err := c.sourceSignatures(ctx, src,
sigs, err := c.sourceSignatures(ctx, src, options,
"Getting image source signatures",
"Checking if image destination supports signatures")
if err != nil {
return copySingleImageResult{}, err
return nil, "", "", err
}
// Determine if we're allowed to modify the manifest.
@@ -127,7 +114,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
if destIsDigestedReference {
cannotModifyManifestReason = "Destination specifies a digest"
}
if c.options.PreserveDigests {
if options.PreserveDigests {
cannotModifyManifestReason = "Instructed to preserve digests"
}
@@ -136,16 +123,13 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
src: src,
// diffIDsAreNeeded is computed later
cannotModifyManifestReason: cannotModifyManifestReason,
requireCompressionFormatMatch: opts.requireCompressionFormatMatch,
cannotModifyManifestReason: cannotModifyManifestReason,
ociEncryptLayers: options.OciEncryptLayers,
}
if opts.compressionFormat != nil {
ic.compressionFormat = opts.compressionFormat
ic.compressionLevel = opts.compressionLevel
} else if c.options.DestinationCtx != nil {
if options.DestinationCtx != nil {
// Note that compressionFormat and compressionLevel can be nil.
ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
ic.compressionFormat = options.DestinationCtx.CompressionFormat
ic.compressionLevel = options.DestinationCtx.CompressionLevel
}
// Decide whether we can substitute blobs with semantic equivalents:
// - Dont do that if we cant modify the manifest at all
@@ -158,20 +142,20 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
if err := ic.updateEmbeddedDockerReference(); err != nil {
return copySingleImageResult{}, err
return nil, "", "", err
}
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
srcMIMEType: ic.src.ManifestMIMEType,
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
forceManifestMIMEType: c.options.ForceManifestMIMEType,
forceManifestMIMEType: options.ForceManifestMIMEType,
requiresOCIEncryption: destRequiresOciEncryption,
cannotModifyManifestReason: ic.cannotModifyManifestReason,
})
if err != nil {
return copySingleImageResult{}, err
return nil, "", "", err
}
// We set up this part of ic.manifestUpdates quite early, not just around the
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
@@ -185,28 +169,27 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if c.options.OptimizeDestinationImageAlreadyExists {
if options.OptimizeDestinationImageAlreadyExists {
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
noPendingManifestUpdates := ic.noPendingManifestUpdates()
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
if err != nil {
logrus.Warnf("Failed to compare destination image manifest: %v", err)
return copySingleImageResult{}, err
return nil, "", "", err
}
if matchedResult != nil {
if isSrcDestManifestEqual {
c.Printf("Skipping: image already present at destination\n")
return *matchedResult, nil
return retManifest, retManifestType, retManifestDigest, nil
}
}
}
compressionAlgos, err := ic.copyLayers(ctx)
if err != nil {
return copySingleImageResult{}, err
if err := ic.copyLayers(ctx); err != nil {
return nil, "", "", err
}
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
@@ -214,12 +197,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
wipResult := copySingleImageResult{
manifest: manifestBytes,
manifestMIMEType: manifestConversionPlan.preferredMIMEType,
manifestDigest: manifestDigest,
}
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
retManifestType = manifestConversionPlan.preferredMIMEType
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
@@ -234,14 +213,14 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
// We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice.
return copySingleImageResult{}, err
return nil, "", "", err
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so lets bail out early and with a better error message.
if ic.cannotModifyManifestReason != "" {
return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
@@ -257,37 +236,34 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
}
// We have successfully uploaded a manifest.
wipResult = copySingleImageResult{
manifest: attemptedManifest,
manifestMIMEType: manifestMIMEType,
manifestDigest: attemptedManifestDigest,
}
manifestBytes = attemptedManifest
retManifestDigest = attemptedManifestDigest
retManifestType = manifestMIMEType
errs = nil // Mark this as a success so that we don't abort below.
break
}
if errs != nil {
return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
}
}
if targetInstance != nil {
targetInstance = &wipResult.manifestDigest
targetInstance = &retManifestDigest
}
newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity)
newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity)
if err != nil {
return copySingleImageResult{}, err
return nil, "", "", err
}
sigs = append(slices.Clone(sigs), newSigs...)
sigs = append(sigs, newSigs...)
if len(sigs) > 0 {
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err)
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
}
}
wipResult.compressionAlgorithms = compressionAlgos
res := wipResult // We are done
return res, nil
return manifestBytes, retManifestType, retManifestDigest, nil
}
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
@@ -305,18 +281,18 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
options := newOrderedSet()
match := false
for _, wantedPlatform := range wantedPlatforms {
// For a transitional period, this might trigger warnings because the Variant
// field was added to OCI config only recently. If this turns out to be too noisy,
// revert this check to only look for (OS, Architecture).
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
// Waiting for https://github.com/opencontainers/image-spec/pull/777 :
// This currently cant use image.MatchesPlatform because we dont know what to use
// for image.Variant.
if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
match = true
break
}
options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant))
options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
}
if !match {
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
c.OS, c.Architecture, strings.Join(options.list, ", "))
}
}
return nil
@@ -347,70 +323,52 @@ func (ic *imageCopier) noPendingManifestUpdates() bool {
return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
}
// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the
// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise.
func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) {
srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob)
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
return nil, fmt.Errorf("calculating manifest digest: %w", err)
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx)
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
if err != nil {
logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
return nil, nil
logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
return false, nil, "", "", nil
}
defer destImageSource.Close()
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
return nil, nil
return false, nil, "", "", nil
}
destManifestDigest, err := manifest.Digest(destManifest)
if err != nil {
return nil, fmt.Errorf("calculating manifest digest: %w", err)
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
if srcManifestDigest != destManifestDigest {
return nil, nil
}
compressionAlgos := set.New[string]()
for _, srcInfo := range ic.src.LayerInfos() {
if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil {
compressionAlgos.Add(c.Name())
}
}
algos, err := algorithmsByNames(compressionAlgos.Values())
if err != nil {
return nil, err
return false, nil, "", "", nil
}
// Destination and source manifests, types and digests should all be equivalent
return &copySingleImageResult{
manifest: destManifest,
manifestMIMEType: destManifestType,
manifestDigest: srcManifestDigest,
compressionAlgorithms: algos,
}, nil
return true, destManifest, destManifestType, destManifestDigest, nil
}
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) {
func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos := ic.src.LayerInfos()
numLayers := len(srcInfos)
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
if err != nil {
return nil, err
return err
}
srcInfosUpdated := false
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
if ic.cannotModifyManifestReason != "" {
return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
}
srcInfos = updatedSrcInfos
srcInfosUpdated = true
@@ -426,7 +384,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
// layer is empty.
man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return nil, err
return err
}
manifestLayerInfos := man.LayerInfos()
@@ -438,7 +396,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
// DiffIDs are, currently, needed only when converting from schema1.
// In which case src.LayerInfos will not have URLs because schema1
// does not support them.
@@ -457,18 +415,12 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
// Decide which layers to encrypt
layersToEncrypt := set.New[int]()
var encryptAll bool
if ic.c.options.OciEncryptLayers != nil {
encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
if ic.ociEncryptLayers != nil {
encryptAll = len(*ic.ociEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.c.options.OciEncryptLayers {
switch {
case l >= 0 && l < totalLayers:
layersToEncrypt.Add(l)
case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers
layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed.
default:
return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers)
}
for _, l := range *ic.ociEncryptLayers {
// if layer is negative, it is reverse indexed.
layersToEncrypt.Add((totalLayers + l) % totalLayers)
}
if encryptAll {
@@ -498,18 +450,14 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
// A call to copyGroup.Wait() is done at this point by the defer above.
return nil
}(); err != nil {
return nil, err
return err
}
compressionAlgos := set.New[string]()
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
for i, cld := range data {
if cld.err != nil {
return nil, cld.err
}
if cld.destInfo.CompressionAlgorithm != nil {
compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name())
return cld.err
}
destInfos[i] = cld.destInfo
diffIDs[i] = cld.diffID
@@ -524,11 +472,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
algos, err := algorithmsByNames(compressionAlgos.Values())
if err != nil {
return nil, err
}
return algos, nil
return nil
}
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
@@ -633,19 +577,6 @@ type diffIDResult struct {
err error
}
func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm {
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
// package (but we should preferably replace/change UpdatedImage instead of productizing
// this workaround).
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
return &compression.Gzip
case imgspecv1.MediaTypeImageLayerZstd:
return &compression.Zstd
}
return nil
}
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
@@ -657,22 +588,27 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// which uses the compression information to compute the updated MediaType values.
// (Sadly UpdatedImage() is documented to not update MediaTypes from
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
//
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
// package (but we should preferably replace/change UpdatedImage instead of productizing
// this workaround).
if srcInfo.CompressionAlgorithm == nil {
srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo)
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
srcInfo.CompressionAlgorithm = &compression.Gzip
case imgspecv1.MediaTypeImageLayerZstd:
srcInfo.CompressionAlgorithm = &compression.Zstd
}
}
ic.c.printCopyInfo("blob", srcInfo)
diffIDIsNeeded := false
var cachedDiffID digest.Digest = ""
if ic.diffIDsAreNeeded {
cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded = cachedDiffID == ""
}
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
// but its not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, lets not.
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil)
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
// Dont read the layer from the source if we already have the blob, and optimizations are acceptable.
@@ -687,20 +623,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
var requiredCompression *compressiontypes.Algorithm
var originalCompression *compressiontypes.Algorithm
if ic.requireCompressionFormatMatch {
requiredCompression = ic.compressionFormat
originalCompression = srcInfo.CompressionAlgorithm
}
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
RequiredCompression: requiredCompression,
OriginalCompression: originalCompression,
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
})
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
@@ -714,8 +642,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}()
// Throw an event that the layer has been skipped
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
ic.c.options.Progress <- types.ProgressProperties{
if ic.c.progress != nil && ic.c.progressInterval > 0 {
ic.c.progress <- types.ProgressProperties{
Event: types.ProgressEventSkipped,
Artifact: srcInfo,
}
@@ -744,9 +672,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
if srcInfo.Size != -1 {
refill := srcInfo.Size - bar.Current()
bar.SetCurrent(srcInfo.Size)
bar.SetRefill(refill)
bar.SetRefill(srcInfo.Size - bar.Current())
}
bar.mark100PercentComplete()
hideProgressBar = false
@@ -892,16 +818,3 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
return digest.Canonical.FromReader(stream)
}
// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names
func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) {
result := []compressiontypes.Algorithm{}
for _, name := range names {
algo, err := compression.AlgorithmByName(name)
if err != nil {
return nil, err
}
result = append(result, algo)
}
return result, nil
}

View File

@@ -190,9 +190,6 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if info.Digest == "" {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
}

View File

@@ -2,7 +2,6 @@ package daemon
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
@@ -86,40 +85,12 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
}
}()
err = imageLoad(ctx, c, reader)
}
// imageLoad accepts tar stream on reader and sends it to c
func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
resp, err := c.ImageLoad(ctx, reader, true)
if err != nil {
return fmt.Errorf("starting a load operation in docker engine: %w", err)
err = fmt.Errorf("saving image to docker engine: %w", err)
return
}
defer resp.Body.Close()
// jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage,
// copied here to minimize dependencies.
type jsonError struct {
Message string `json:"message,omitempty"`
}
type jsonMessage struct {
Error *jsonError `json:"errorDetail,omitempty"`
}
dec := json.NewDecoder(resp.Body)
for {
var msg jsonMessage
if err := dec.Decode(&msg); err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("parsing docker load progress: %w", err)
}
if msg.Error != nil {
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
}
}
return nil // No error reported = success
}
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved

View File

@@ -24,7 +24,6 @@ import (
"github.com/docker/distribution/registry/api/errcode"
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
"golang.org/x/exp/slices"
)
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
@@ -106,7 +105,7 @@ func makeErrorList(err error) []error {
}
func mergeErrors(err1, err2 error) error {
return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...))
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
}
// handleErrorResponse returns error parsed from HTTP response for an

View File

@@ -1,6 +1,7 @@
package docker
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
@@ -18,7 +19,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/useragent"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/docker/config"
@@ -121,9 +121,6 @@ type dockerClient struct {
// Private state for detectProperties:
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
detectPropertiesError error // detectPropertiesError caches the initial error.
// Private state for logResponseWarnings
reportedWarningsLock sync.Mutex
reportedWarnings *set.Set[string]
}
type authScope struct {
@@ -284,11 +281,10 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
}
return &dockerClient{
sys: sys,
registry: registry,
userAgent: userAgent,
tlsClientConfig: tlsClientConfig,
reportedWarnings: set.New[string](),
sys: sys,
registry: registry,
userAgent: userAgent,
tlsClientConfig: tlsClientConfig,
}, nil
}
@@ -363,11 +359,6 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
hostname := registry
if registry == dockerHostname {
hostname = dockerV1Hostname
// A search term of library/foo does not find the library/foo image on the docker.io servers,
// which is surprising - and that Docker is modifying the search term client-side this same way,
// and it seems convenient to do the same thing.
// Read more here: https://github.com/containers/image/pull/2133#issue-1928524334
image = strings.TrimPrefix(image, "library/")
}
client, err := newDockerClient(sys, hostname, registry)
@@ -633,76 +624,9 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method
if err != nil {
return nil, err
}
if warnings := res.Header.Values("Warning"); len(warnings) != 0 {
c.logResponseWarnings(res, warnings)
}
return res, nil
}
// logResponseWarnings logs warningHeaders from res, if any.
func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) {
c.reportedWarningsLock.Lock()
defer c.reportedWarningsLock.Unlock()
for _, header := range warningHeaders {
warningString := parseRegistryWarningHeader(header)
if warningString == "" {
logrus.Debugf("Ignored Warning: header from registry: %q", header)
} else {
if !c.reportedWarnings.Contains(warningString) {
c.reportedWarnings.Add(warningString)
// Note that reportedWarnings is based only on warningString, so that we dont
// repeat the same warning for every request - but the warning includes the URL;
// so it may not be specific to that URL.
logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString)
} else {
logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString)
}
}
}
}
// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning
// values allowed by opencontainers/distribution-spec.
// It returns the warning string if the header has the expected format, or "" otherwise.
func parseRegistryWarningHeader(header string) string {
const expectedPrefix = `299 - "`
const expectedSuffix = `"`
// warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
// distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) {
return ""
}
header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)]
// ”Recipients that process the value of a quoted-string MUST handle a quoted-pair
// as if it were replaced by the octet following the backslash.”, so lets do that…
res := strings.Builder{}
afterBackslash := false
for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points
switch {
case c == 0x7F || (c < ' ' && c != '\t'):
return "" // Control characters are forbidden
case afterBackslash:
res.WriteByte(c)
afterBackslash = false
case c == '"':
// This terminates the warn-text and warn-date, forbidden by distribution-spec, follows,
// or completely invalid input.
return ""
case c == '\\':
afterBackslash = true
default:
res.WriteByte(c)
}
}
if afterBackslash {
return ""
}
return res.String()
}
// we're using the challenges from the /v2/ ping response and not the one from the destination
// URL in this request because:
//
@@ -1084,10 +1008,9 @@ func isManifestUnknownError(err error) bool {
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
return true
}
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
// but specifies that the HTTP status must be 404.
// ALSO registry.redhat.io as of October 2022
var unexpected *unexpectedHTTPResponseError
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound {
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) {
return true
}
return false

View File

@@ -137,7 +137,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests {
if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
if err != nil {
@@ -321,78 +321,33 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
if impl.OriginalBlobMatchesRequiredCompression(options) {
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, private.ReusedBlob{}, err
}
if haveBlob {
return true, reusedInfo, nil
}
} else {
requiredCompression := "nil"
if options.OriginalCompression != nil {
requiredCompression = options.OriginalCompression.Name()
}
logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression)
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, private.ReusedBlob{}, err
}
if haveBlob {
return true, reusedInfo, nil
}
// Then try reusing blobs from other locations.
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
for _, candidate := range candidates {
var err error
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
var candidateRepo reference.Named
if !candidate.UnknownLocation {
candidateRepo, err = parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
}
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
requiredCompression := "nil"
if compressionAlgorithm != nil {
requiredCompression = compressionAlgorithm.Name()
}
if !candidate.UnknownLocation {
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
} else {
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression)
}
continue
}
if !candidate.UnknownLocation {
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
} else {
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
}
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
// (the "from" parameter); in that case we might try to use these candidates as well.
//
// OTOH that would mean we cant do the “blobExists” check, and if there is no match
// we could get an upload request that we would have to cancel.
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
continue
}
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
} else {
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
} else {
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
}
// This digest is a known variant of this blob but we dont
// have a recorded location in this registry, lets try looking
// for it in the current repo.
candidateRepo = reference.TrimNamed(d.ref.ref)
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
}
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
continue
}
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
logrus.Debug("... Already tried the primary destination")
@@ -433,6 +388,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
logrus.Debugf("... Failed: %v", err)
continue
}
return true, private.ReusedBlob{
Digest: candidate.Digest,
Size: size,
@@ -707,10 +668,6 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
}
}
// To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it.
ociManifest.Layers = slices.Clone(ociManifest.Layers)
// We dont need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to
// the slice in the original object (or in a newly allocated object).
for _, sig := range signatures {
mimeType := sig.UntrustedMIMEType()
payloadBlob := sig.UntrustedPayload()

View File

@@ -47,12 +47,7 @@ func httpResponseToError(res *http.Response, context string) error {
}
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
// registry.
//
// WARNING: The OCI distribution spec says
// “A `4XX` response code from the registry MAY return a body in any format.”; but if it is
// JSON, it MUST use the errcode.Error structure.
// So, callers should primarily decide based on HTTP StatusCode, not based on error type here.
// registry
func registryHTTPResponseToError(res *http.Response) error {
err := handleErrorResponse(res)
// len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is.
@@ -88,7 +83,7 @@ func registryHTTPResponseToError(res *http.Response) error {
response = response[:50] + "..."
}
// %.0w makes e visible to error.Unwrap() without including any text
err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e)
err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e)
case errcode.Error:
// e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually
// rather redundant. So reword it without using e.Code.Error() if e.Message is the default.

View File

@@ -129,9 +129,6 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if err := d.archive.lock(); err != nil {
return false, private.ReusedBlob{}, err
}

View File

@@ -57,7 +57,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file
tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar")
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
return nil, fmt.Errorf("creating temporary file: %w", err)
}

View File

@@ -40,7 +40,7 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
// then in its parent "docker.io/library"; in none of "busybox",
// un-namespaced "library" nor in "" supposedly implicitly representing "library/".
//
// ref.Name() == ref.Domain() + "/" + ref.Path(), so the last
// ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
// iteration matches the host name (for any namespace).
res := []string{}
name := ref.Name()

View File

@@ -2,8 +2,6 @@ package image
import (
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/unparsedimage"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
@@ -19,23 +17,3 @@ type UnparsedImage = image.UnparsedImage
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
return image.UnparsedInstance(src, instanceDigest)
}
// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef
type unparsedWithRef struct {
private.UnparsedImage
ref types.ImageReference
}
func (uwr *unparsedWithRef) Reference() types.ImageReference {
return uwr.ref
}
// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef.
// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image
// based on a remote-registry policy.
func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage {
return &unparsedWithRef{
UnparsedImage: unparsedimage.FromPublic(wrappedInstance),
ref: replacementRef,
}
}

View File

@@ -23,12 +23,6 @@ type v1OnlyBlobInfoCache struct {
types.BlobInfoCache
}
func (bic *v1OnlyBlobInfoCache) Open() {
}
func (bic *v1OnlyBlobInfoCache) Close() {
}
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
}

View File

@@ -18,13 +18,6 @@ const (
// of compression was applied to the blobs it keeps information about.
type BlobInfoCache2 interface {
types.BlobInfoCache
// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
Open()
// Close destroys state created by Open().
Close()
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
// or Uncompressed or UnknownCompression.
// WARNING: Only call this with LOCALLY VERIFIED data; dont record a compressor for a
@@ -32,7 +25,7 @@ type BlobInfoCache2 interface {
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
@@ -46,8 +39,7 @@ type BlobInfoCache2 interface {
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
type BICReplacementCandidate2 struct {
Digest digest.Digest
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
UnknownLocation bool // is true when `Location` for this blob is not set
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
Digest digest.Digest
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
Location types.BICLocationReference
}

View File

@@ -12,10 +12,8 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
type manifestOCI1 struct {
@@ -88,7 +86,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
// old image manifests work (docker v2s1 especially).
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
cb, err := m.ConfigBlob(ctx)
@@ -196,86 +194,26 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
return m.convertToManifestSchema2(ctx, options)
}
// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format.
// If not, it returns (nil, nil).
// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
// and edits *options to not try decryption again.
func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
if options == nil || options.LayerInfos == nil {
return nil, nil
}
originalInfos := m.LayerInfos()
if len(originalInfos) != len(options.LayerInfos) {
return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
}
ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate.
laterEdits := slices.Clone(options.LayerInfos)
needsOCIOnlyEdits := false
for i, edit := range options.LayerInfos {
// Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal
ociOnlyEdits[i].CompressionAlgorithm = nil
if edit.CryptoOperation == types.Decrypt {
needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they cant be represented in Docker schemas
ociOnlyEdits[i].CryptoOperation = types.Decrypt
laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
}
if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd ||
originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they cant be represented in Docker schemas.
ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation
ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm
laterEdits[i].CompressionOperation = types.PreserveOriginal
laterEdits[i].CompressionAlgorithm = nil
}
}
if !needsOCIOnlyEdits {
return nil, nil
}
options.LayerInfos = laterEdits
return ociOnlyEdits, nil
}
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
}
// Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
// which remove OCI-specific features, because trying to convert those layers would fail.
// So, do the layer updates for decryption, and for conversions from Zstd.
ociManifest := m.m
ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options)
if err != nil {
return nil, err
}
if ociOnlyEdits != nil {
ociManifest = manifest.OCI1Clone(ociManifest)
if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil {
return nil, err
}
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
// Create a copy of the descriptor.
config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config)
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
// Above, we have already checked that this manifest refers to an image, not an OCI artifact,
// so the only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers))
layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
for idx := range layers {
layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx])
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
switch layers[idx].MediaType {
case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
@@ -289,9 +227,6 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
case imgspecv1.MediaTypeImageLayerZstd:
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc:
return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
default:
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
}
@@ -309,7 +244,7 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
// We can't directly convert images to V1, but we can transitively convert via a V2 image

View File

@@ -1,25 +0,0 @@
package impl
import (
"github.com/containers/image/v5/internal/private"
compression "github.com/containers/image/v5/pkg/compression/types"
)
// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required
// then function performs a match against the compression requested by the caller and compression of existing blob
// (which can be nil to represent uncompressed or unknown)
func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
if options.RequiredCompression == nil {
return true // no requirement imposed
}
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we dont record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
}
return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
}
func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool {
return BlobMatchesRequiredCompression(opts, opts.OriginalCompression)
}

View File

@@ -64,9 +64,6 @@ func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inpu
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if options.RequiredCompression != nil {
return false, private.ReusedBlob{}, nil
}
reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
if !reused || err != nil {
return reused, private.ReusedBlob{}, err

View File

@@ -5,7 +5,6 @@ import (
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
compression "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -58,15 +57,11 @@ func (list *Schema2ListPublic) Instances() []digest.Digest {
func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
ret := ListUpdate{
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}
ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName}
platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
ret.ReadOnly.Platform = &platform
return ret, nil
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
@@ -114,28 +109,23 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
}
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
case ListOpAdd:
if editInstance.AddPlatform == nil {
// Should we create a struct with empty fields instead?
// Right now ListOpAdd is only called when an instance with the same platform value
// already exists in the manifest, so this should not be reached in practice.
return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported")
}
addedEntries = append(addedEntries, Schema2ManifestDescriptor{
Schema2Descriptor{
Digest: editInstance.AddDigest,
Size: editInstance.AddSize,
MediaType: editInstance.AddMediaType,
addInstance := Schema2ManifestDescriptor{
Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType},
Schema2PlatformSpec{
OS: editInstance.AddPlatform.OS,
Architecture: editInstance.AddPlatform.Architecture,
OSVersion: editInstance.AddPlatform.OSVersion,
OSFeatures: editInstance.AddPlatform.OSFeatures,
Variant: editInstance.AddPlatform.Variant,
},
schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform),
})
}
addedEntries = append(addedEntries, addInstance)
default:
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
}
}
if len(addedEntries) != 0 {
// slices.Clone() here to ensure a private backing array;
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
index.Manifests = append(index.Manifests, addedEntries...)
}
return nil
}
@@ -158,7 +148,13 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
@@ -218,14 +214,20 @@ func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
components = append(components, imgspecv1.Descriptor{
converted := imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
Platform: &platform,
})
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: slices.Clone(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
}
oci := OCI1IndexPublicFromComponents(components, nil)
return oci, nil
@@ -300,15 +302,3 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
}
return schema2ListFromPublic(public), nil
}
// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture.
func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform {
return imgspecv1.Platform{
Architecture: p.Architecture,
OS: p.OS,
OSVersion: p.OSVersion,
OSFeatures: slices.Clone(p.OSFeatures),
Variant: p.Variant,
// Features is not supported in OCI, and discarded.
}
}

View File

@@ -1,10 +1,6 @@
package manifest
import (
"fmt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
import "fmt"
// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
@@ -30,20 +26,8 @@ type NonImageArtifactError struct {
mimeType string
}
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest.
//
// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig .
func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error {
// Callers decide based on manifest.Config.MediaType that this is not an image;
// in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically
// more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON.
//
// If ArtifactType and Config.MediaType are both defined and non-trivial, presumably
// ArtifactType is the “top-level” one, although thats not defined by the spec.
mimeType := manifest.ArtifactType
if mimeType == "" {
mimeType = manifest.Config.MediaType
}
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType.
func NewNonImageArtifactError(mimeType string) error {
return NonImageArtifactError{mimeType: mimeType}
}

View File

@@ -68,12 +68,6 @@ type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
// ReadOnly fields: may be set by Instance(), ignored by UpdateInstance()
ReadOnly struct {
Platform *imgspecv1.Platform
Annotations map[string]string
CompressionAlgorithmNames []string
}
}
type ListOp int

View File

@@ -53,15 +53,11 @@ func (index *OCI1IndexPublic) Instances() []digest.Digest {
func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
ret := ListUpdate{
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}
ret.ReadOnly.Platform = manifest.Platform
ret.ReadOnly.Annotations = manifest.Annotations
ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations)
return ret, nil
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
@@ -82,29 +78,14 @@ func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
return index.editInstances(editInstances)
}
func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string {
result := make([]string, 0, 1)
if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue {
result = append(result, compression.ZstdAlgorithmName)
}
// No compression was detected, hence assume instance has default compression `Gzip`
if len(result) == 0 {
result = append(result, compression.GzipAlgorithmName)
}
return result
}
func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) {
func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap map[string]string) {
// TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm
// list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable
// and full compressionAlghorithms list.
if *annotationsMap == nil && len(compressionAlgorithms) > 0 {
*annotationsMap = map[string]string{}
}
for _, algo := range compressionAlgorithms {
switch algo.Name() {
case compression.ZstdAlgorithmName:
(*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
annotationsMap[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
default:
continue
}
@@ -149,13 +130,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations)
}
}
addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations)
addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, index.Manifests[targetIndex].Annotations)
case ListOpAdd:
annotations := map[string]string{}
if editInstance.AddAnnotations != nil {
annotations = maps.Clone(editInstance.AddAnnotations)
}
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations)
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, annotations)
addedEntries = append(addedEntries, imgspecv1.Descriptor{
MediaType: editInstance.AddMediaType,
Size: editInstance.AddSize,
@@ -167,24 +148,11 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
}
}
if len(addedEntries) != 0 {
// slices.Clone() here to ensure the slice uses a private backing array;
// an external caller could have manually created OCI1IndexPublic with a slice with extra capacity.
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
index.Manifests = append(index.Manifests, addedEntries...)
}
if len(addedEntries) != 0 || updatedAnnotations {
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
// FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes
// into more algorithms?
aZstd := instanceIsZstd(a)
bZstd := instanceIsZstd(b)
switch {
case aZstd == bZstd:
return 0
case !aZstd: // Implies bZstd
return -1
default: // aZstd && !bZstd
return 1
}
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool {
return !instanceIsZstd(a) && instanceIsZstd(b)
})
}
return nil
@@ -222,7 +190,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip
case ic.manifestPosition != other.manifestPosition:
return ic.manifestPosition < other.manifestPosition
}
panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
}
// chooseInstance is a private equivalent to ChooseInstanceByCompression,
@@ -241,7 +209,13 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
for manifestIndex, d := range index.Manifests {
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
if d.Platform != nil {
imagePlatform := ociPlatformClone(*d.Platform)
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
return platform.MatchesPlatform(imagePlatform, wantedPlatform)
})
@@ -295,8 +269,13 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platformCopy := ociPlatformClone(*component.Platform)
platform = &platformCopy
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
@@ -333,15 +312,22 @@ func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
Architecture: runtime.GOARCH,
}
}
components = append(components, Schema2ManifestDescriptor{
converted := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
},
schema2PlatformSpecFromOCIPlatform(*platform),
})
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: slices.Clone(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
}
s2 := Schema2ListPublicFromComponents(components)
return s2, nil
@@ -415,32 +401,3 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
}
return oci1IndexFromPublic(public), nil
}
// ociPlatformClone returns an independent copy of p.
func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform {
// The only practical way in Go to give read-only access to an array is to copy it.
// The only practical way in Go to copy a deep structure is to either do it manually field by field,
// or to use reflection (incl. a round-trip through JSON, which uses reflection).
//
// The combination of the two is just sad, and leads to code like this, which will
// need to be updated with every new Platform field.
return imgspecv1.Platform{
Architecture: p.Architecture,
OS: p.OS,
OSVersion: p.OSVersion,
OSFeatures: slices.Clone(p.OSFeatures),
Variant: p.Variant,
}
}
// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure.
func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec {
return Schema2PlatformSpec{
Architecture: p.Architecture,
OS: p.OS,
OSVersion: p.OSVersion,
OSFeatures: slices.Clone(p.OSFeatures),
Variant: p.Variant,
Features: nil,
}
}

View File

@@ -128,10 +128,6 @@ var compatibility = map[string][]string{
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
// Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
// The fields are not specified by the OCI specification, as of version 1.1, usefully enough
// to be interoperable, anyway.
wantedArch := runtime.GOARCH
wantedVariant := ""
if ctx != nil && ctx.ArchitectureChoice != "" {

View File

@@ -112,11 +112,10 @@ type TryReusingBlobOptions struct {
// Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
// if they use internal/imagedestination/impl.Compat;
// in that case, they will all be consistently zero-valued.
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
SrcRef reference.Named // A reference to the source image that contains the input blob.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
SrcRef reference.Named // A reference to the source image that contains the input blob.
}
// ReusedBlob is information about a blob reused in a destination.

Some files were not shown because too many files have changed in this diff Show More