From 4d80bf8c7d1594f9bad7db4b84bd46f1cf7901ab Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 18:22:38 +0000 Subject: [PATCH] fix(deps): update github.com/containers/image/v5 digest to faa4f4f Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 46 +- go.sum | 109 +-- vendor/github.com/Microsoft/hcsshim/Makefile | 87 ++- .../Microsoft/hcsshim/internal/oc/errors.go | 4 +- .../github.com/containerd/containerd/NOTICE | 16 - .../{containerd => errdefs}/LICENSE | 0 .../github.com/containerd/errdefs/README.md | 13 + .../{containerd => }/errdefs/errors.go | 0 .../{containerd => }/errdefs/grpc.go | 0 .../containers/image/v5/copy/compression.go | 11 +- .../image/v5/copy/digesting_reader.go | 4 +- .../containers/image/v5/copy/multiple.go | 3 + .../containers/image/v5/copy/progress_bars.go | 4 +- .../containers/image/v5/copy/single.go | 52 +- .../image/v5/docker/docker_client.go | 35 +- .../internal/imagedestination/impl/helpers.go | 3 +- .../stubs/put_blob_partial.go | 3 +- .../image/v5/internal/manifest/list.go | 2 + .../image/v5/internal/manifest/manifest.go | 7 +- .../image/v5/internal/manifest/oci_index.go | 28 +- .../image/v5/internal/private/private.go | 12 +- .../containers/image/v5/manifest/common.go | 2 +- .../containers/image/v5/manifest/oci.go | 36 - .../image/v5/oci/archive/oci_dest.go | 5 +- .../image/v5/openshift/openshift_dest.go | 5 +- .../image/v5/pkg/compression/compression.go | 12 +- .../v5/pkg/compression/internal/types.go | 36 +- .../v5/signature/sigstore/fulcio/fulcio.go | 8 +- .../image/v5/storage/storage_dest.go | 634 ++++++++++++------ .../image/v5/storage/storage_image.go | 16 +- .../image/v5/storage/storage_src.go | 81 +-- .../github.com/containers/storage/.cirrus.yml | 2 +- vendor/github.com/containers/storage/Makefile | 2 +- vendor/github.com/containers/storage/VERSION | 2 +- .../containers/storage/drivers/driver.go | 22 +- .../storage/drivers/overlay/composefs.go | 72 +- .../storage/drivers/overlay/overlay.go | 253 ++++--- .../storage/drivers/overlay/overlay_nocgo.go | 13 + .../containers/storage/drivers/vfs/driver.go | 56 +- .../github.com/containers/storage/layers.go | 126 +++- .../containers/storage/pkg/archive/archive.go | 35 +- .../storage/pkg/archive/archive_unix.go | 25 + .../storage/pkg/chunked/cache_linux.go | 26 +- .../storage/pkg/chunked/compression_linux.go | 4 +- .../storage/pkg/chunked/dump/dump.go | 24 +- .../storage/pkg/chunked/storage_linux.go | 194 ++++-- .../containers/storage/pkg/chunked/toc/toc.go | 19 +- .../storage/pkg/fsverity/fsverity_linux.go | 45 ++ .../pkg/fsverity/fsverity_unsupported.go | 21 + .../storage/pkg/homedir/homedir_others.go | 4 +- .../storage/pkg/homedir/homedir_windows.go | 29 + .../storage/pkg/idtools/idtools_unix.go | 2 +- .../containers/storage/storage.conf | 9 +- vendor/github.com/containers/storage/store.go | 172 ++++- .../github.com/containers/storage/userns.go | 4 +- .../go-jose/go-jose/v3/BUG-BOUNTY.md | 10 - .../go-jose/go-jose/v3/CHANGELOG.md | 19 +- .../github.com/go-jose/go-jose/v3/README.md | 53 +- .../github.com/go-jose/go-jose/v3/SECURITY.md | 13 + .../go-jose/go-jose/v3/asymmetric.go | 3 + .../github.com/go-jose/go-jose/v3/crypter.go | 93 ++- vendor/github.com/go-jose/go-jose/v3/doc.go | 2 - .../github.com/go-jose/go-jose/v3/encoding.go | 33 + .../go-jose/go-jose/v3/json/decode.go | 3 +- .../go-jose/go-jose/v3/json/encode.go | 28 +- .../go-jose/go-jose/v3/json/stream.go | 1 - vendor/github.com/go-jose/go-jose/v3/jwe.go | 14 +- vendor/github.com/go-jose/go-jose/v3/jwk.go | 18 +- vendor/github.com/go-jose/go-jose/v3/jws.go | 13 +- .../github.com/go-jose/go-jose/v3/opaque.go | 2 +- .../github.com/go-jose/go-jose/v3/shared.go | 9 +- .../github.com/go-jose/go-jose/v3/signing.go | 59 +- .../go-jose/go-jose/v3/symmetric.go | 15 +- vendor/github.com/go-openapi/strfmt/format.go | 3 +- vendor/github.com/google/uuid/CHANGELOG.md | 13 + vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/version7.go | 39 +- .../github.com/klauspost/compress/README.md | 10 +- .../runc => moby/sys/user}/LICENSE | 13 +- .../sys}/user/lookup_unix.go | 0 .../libcontainer => moby/sys}/user/user.go | 0 .../sys}/user/user_fuzzer.go | 0 vendor/github.com/opencontainers/runc/NOTICE | 17 - .../runtime-spec/specs-go/config.go | 10 + .../runtime-spec/specs-go/version.go | 2 +- .../sigstore/pkg/oauth/interactive.go | 114 +++- .../pkg/oauthflow/client_credentials.go | 156 +++++ .../sigstore/sigstore/pkg/oauthflow/flow.go | 12 + .../sigstore/pkg/signature/ed25519ph.go | 211 ++++++ .../sigstore/pkg/signature/options.go | 8 + .../pkg/signature/options/loadoptions.go | 76 +++ .../sigstore/pkg/signature/options/noop.go | 10 + .../sigstore/sigstore/pkg/signature/signer.go | 36 + .../sigstore/pkg/signature/signerverifier.go | 36 + .../sigstore/pkg/signature/verifier.go | 38 ++ .../testify/assert/assertion_compare.go | 28 +- .../assert/assertion_compare_can_convert.go | 16 - .../assert/assertion_compare_legacy.go | 16 - .../testify/assert/assertion_format.go | 32 +- .../testify/assert/assertion_forward.go | 59 +- .../stretchr/testify/assert/assertions.go | 207 +++--- .../testify/assert/http_assertions.go | 27 +- .../stretchr/testify/require/require.go | 65 +- .../testify/require/require_forward.go | 59 +- .../stretchr/testify/suite/suite.go | 31 +- .../bson/bsoncodec/array_codec.go | 7 +- .../bson/bsoncodec/byte_slice_codec.go | 25 +- .../bson/bsoncodec/default_value_decoders.go | 10 +- .../bson/bsoncodec/default_value_encoders.go | 12 +- .../bson/bsoncodec/empty_interface_codec.go | 24 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 38 +- .../bson/bsoncodec/pointer_codec.go | 16 +- .../bson/bsoncodec/slice_codec.go | 27 +- .../bson/bsoncodec/string_codec.go | 16 +- .../bson/bsoncodec/struct_codec.go | 62 +- .../mongo-driver/bson/bsoncodec/time_codec.go | 24 +- .../mongo-driver/bson/bsoncodec/uint_codec.go | 24 +- .../mongo-driver/bson/bsonrw/copier.go | 5 +- .../bson/bsonrw/extjson_parser.go | 2 +- .../bson/bsonrw/extjson_reader.go | 5 +- .../mongo-driver/bson/bsonrw/json_scanner.go | 26 +- .../go.mongodb.org/mongo-driver/bson/doc.go | 83 ++- .../mongo-driver/bson/primitive/decimal.go | 3 - .../mongo-driver/bson/primitive/objectid.go | 4 +- .../x/crypto/internal/poly1305/sum_ppc64le.s | 14 +- vendor/golang.org/x/crypto/ocsp/ocsp.go | 13 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/tools/go/packages/doc.go | 40 +- .../x/tools/go/packages/external.go | 77 ++- .../golang.org/x/tools/go/packages/golist.go | 35 +- .../x/tools/go/packages/packages.go | 46 +- .../x/tools/internal/gcimporter/iimport.go | 7 + vendor/modules.txt | 65 +- 133 files changed, 3431 insertions(+), 1463 deletions(-) delete mode 100644 vendor/github.com/containerd/containerd/NOTICE rename vendor/github.com/containerd/{containerd => errdefs}/LICENSE (100%) create mode 100644 vendor/github.com/containerd/errdefs/README.md rename vendor/github.com/containerd/{containerd => }/errdefs/errors.go (100%) rename vendor/github.com/containerd/{containerd => }/errdefs/grpc.go (100%) create mode 100644 vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go delete mode 100644 vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/SECURITY.md rename vendor/github.com/{opencontainers/runc => moby/sys/user}/LICENSE (94%) rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/lookup_unix.go (100%) rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/user.go (100%) rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/user_fuzzer.go (100%) delete mode 100644 vendor/github.com/opencontainers/runc/NOTICE create mode 100644 vendor/github.com/sigstore/sigstore/pkg/oauthflow/client_credentials.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go diff --git a/go.mod b/go.mod index 17f73902..a18342f8 100644 --- a/go.mod +++ b/go.mod @@ -5,9 +5,9 @@ go 1.19 require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/containers/common v0.57.4 - github.com/containers/image/v5 v5.29.3-0.20240207231441-93b4b55d865b + github.com/containers/image/v5 v5.29.3-0.20240301163503-faa4f4fd0e4f github.com/containers/ocicrypt v1.1.9 - github.com/containers/storage v1.52.0 + github.com/containers/storage v1.52.1-0.20240301111729-226cffb1c4d2 github.com/docker/distribution v2.8.3+incompatible github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 @@ -15,9 +15,9 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -26,12 +26,12 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.12.0-rc.2 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/coreos/go-oidc/v3 v3.9.0 // indirect @@ -45,7 +45,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.2 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect @@ -55,15 +55,15 @@ require ( github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/strfmt v0.22.0 // indirect + github.com/go-openapi/strfmt v0.22.1 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-containerregistry v0.17.0 // indirect + github.com/google/go-containerregistry v0.19.0 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -72,8 +72,9 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.6 // indirect + github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect + github.com/kr/pretty v0.3.1 // indirect github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -83,11 +84,11 @@ require ( github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect + github.com/moby/sys/user v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/runc v1.1.12 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect @@ -98,9 +99,10 @@ require ( github.com/russross/blackfriday v2.0.0+incompatible // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect + github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/sigstore/fulcio v1.4.3 // indirect github.com/sigstore/rekor v1.2.2 // indirect - github.com/sigstore/sigstore v1.8.1 // indirect + github.com/sigstore/sigstore v1.8.2 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/sylabs/sif/v2 v2.15.1 // indirect @@ -112,23 +114,25 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - go.mongodb.org/mongo-driver v1.13.1 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + golang.org/x/crypto v0.20.0 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.18.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect diff --git a/go.sum b/go.sum index 818d9e08..28614437 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.12.0-rc.2 h1:gfKebjq3Mq17Ys+4cjE8vc2h6tZVeqCGb9a7vBVqpAk= -github.com/Microsoft/hcsshim v0.12.0-rc.2/go.mod h1:G2TZhBED5frlh/hsuxV5CDh/ylkSFknPAMPpQg9owQw= +github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= +github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= @@ -29,21 +29,21 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= -github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= github.com/containers/common v0.57.4 h1:kmfBad92kUjP5X44BPpOwMe+eZQqaKETfS+ASeL0g+g= github.com/containers/common v0.57.4/go.mod h1:o3L3CyOI9yr+JC8l4dZgvqTxcjs3qdKmkek00uchgvw= -github.com/containers/image/v5 v5.29.3-0.20240207231441-93b4b55d865b h1:SDiNbvEwiBB0TdoIBkfkY39FD1c2jzL3aJIXRsSr0oA= -github.com/containers/image/v5 v5.29.3-0.20240207231441-93b4b55d865b/go.mod h1:hD2xTPHZ/QaNHhxx92ysHfy8OlrakkfkHQkaz4tD7Gs= +github.com/containers/image/v5 v5.29.3-0.20240301163503-faa4f4fd0e4f h1:iJOH809/o1+PGUa+WlkOzC17o2YgXZwylsG6W37Rz2A= +github.com/containers/image/v5 v5.29.3-0.20240301163503-faa4f4fd0e4f/go.mod h1:lXWDxGqOqfwx8XaHyVM2BKv3Y99qTlM7dYAD/P45Mh4= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM= github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= -github.com/containers/storage v1.52.0 h1:8QFFeJg2cQFN0TyJguxHrSz3bl7XtMRnfXrTsvLVkuY= -github.com/containers/storage v1.52.0/go.mod h1:PE+L330tisEjQrAVkfAlW8ECvqzc/jusrxJzu9TEi2w= +github.com/containers/storage v1.52.1-0.20240301111729-226cffb1c4d2 h1:9+rUow7EtVgtlcn8oFValad/M548QnSpSCSBctduPbw= +github.com/containers/storage v1.52.1-0.20240301111729-226cffb1c4d2/go.mod h1:mFA6QpUoT9qTa3q2DD1CvSo3Az3syNkw1P9X+4nUYdY= github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -79,8 +79,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-jose/go-jose/v3 v3.0.2 h1:2Edjn8Nrb44UvTdp84KU0bBPs1cO7noRCybtS3eJEUQ= +github.com/go-jose/go-jose/v3 v3.0.2/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -114,8 +114,8 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= -github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= +github.com/go-openapi/strfmt v0.22.1 h1:5Ky8cybT4576C6Ffc+8gYji/wRXCo6Ozm8RaWjPI6jc= +github.com/go-openapi/strfmt v0.22.1/go.mod h1:OfVoytIXJasDkkGvkb1Cceb3BPyMOwk1FgmyyEw7NYg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -124,7 +124,7 @@ github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZC github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c= +github.com/go-rod/rod v0.114.7 h1:h4pimzSOUnw7Eo41zdJA788XsawzHjJMyzCE3BrBww0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= @@ -181,9 +181,10 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk= -github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= +github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -191,8 +192,8 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBx github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= @@ -222,8 +223,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -231,7 +232,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -264,6 +266,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -283,10 +287,8 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eBSHhccVHy6lsBeM4= github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4= -github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= -github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -295,6 +297,7 @@ github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqw github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -314,6 +317,7 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk= github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -325,12 +329,13 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo= -github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E= +github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c= +github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -357,8 +362,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0= github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -378,10 +383,8 @@ github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YH github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -401,8 +404,8 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= @@ -412,6 +415,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1: go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= @@ -419,29 +423,31 @@ go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJ go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -455,11 +461,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -469,6 +477,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -487,10 +496,14 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -500,6 +513,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -516,8 +530,9 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -529,10 +544,10 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0= google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index d8eb30b8..de643589 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -29,12 +29,23 @@ ifeq "$(DEV_BUILD)" "1" DELTA_TARGET=out/delta-dev.tar.gz endif +ifeq "$(SNP_BUILD)" "1" +DELTA_TARGET=out/delta-snp.tar.gz +endif + # The link aliases for gcstools GCS_TOOLS=\ generichook \ install-drivers -.PHONY: all always rootfs test +# Common path prefix. +PATH_PREFIX:= +# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) +VMGS_TOOL:= +IGVM_TOOL:= +KERNEL_PATH:= + +.PHONY: all always rootfs test snp simple .DEFAULT_GOAL := all @@ -49,9 +60,58 @@ test: rootfs: out/rootfs.vhd -out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4 +snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs + +simple: out/simple.vmgs snp + +%.vmgs: %.bin + rm -f $@ + # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes + $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` + $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 + +# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. +out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0 + +ROOTFS_DEVICE:=/dev/sda +VERITY_DEVICE:=/dev/sdb +# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) +out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 + +# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0 + +# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. +%.vhd: % bin/cmd/tar2ext4 + ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ + +# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. +%.vhd: %.ext4 bin/cmd/tar2ext4 + ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ + +%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt + veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info + # Retrieve info required by dm-verity at boot time + # Get the blocksize of rootfs + cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest + cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt + cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize + cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize + cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks + echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors + +out/rootfs.hash.salt: + hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ + +out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4 gzip -f -d ./out/rootfs.tar.gz - bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@ + ./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@ out/rootfs.tar.gz: out/initrd.img rm -rf rootfs-conv @@ -74,6 +134,20 @@ out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report tar -zcf $@ -C rootfs-dev . rm -rf rootfs-dev +out/delta-snp.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report boot/startup_v2056.sh boot/startup_simple.sh boot/startup.sh + rm -rf rootfs-snp + mkdir rootfs-snp + tar -xzf out/delta.tar.gz -C rootfs-snp + cp boot/startup_v2056.sh rootfs-snp/startup_v2056.sh + cp boot/startup_simple.sh rootfs-snp/startup_simple.sh + cp boot/startup.sh rootfs-snp/startup.sh + cp bin/internal/tools/snp-report rootfs-snp/bin/ + chmod a+x rootfs-snp/startup_v2056.sh + chmod a+x rootfs-snp/startup_simple.sh + chmod a+x rootfs-snp/startup.sh + tar -zcf $@ -C rootfs-snp . + rm -rf rootfs-snp + out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile @mkdir -p out rm -rf rootfs @@ -94,7 +168,10 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs -bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: +out/containerd-shim-runhcs-v1.exe: + GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1 + +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd: @mkdir -p $(dir $@) GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) @@ -108,4 +185,4 @@ bin/init: init/init.o vsockexec/vsock.o %.o: %.c @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go index 71df25b8..8c41a366 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -6,7 +6,7 @@ import ( "net" "os" - "github.com/containerd/containerd/errdefs" + "github.com/containerd/errdefs" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -16,7 +16,7 @@ import ( func toStatusCode(err error) codes.Code { // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status, - // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a + // wraps an error defined in "github.com/containerd/errdefs", or is a // context timeout or cancelled error if s, ok := status.FromError(errdefs.ToGRPC(err)); ok { return s.Code() diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f027..00000000 --- a/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/errdefs/LICENSE similarity index 100% rename from vendor/github.com/containerd/containerd/LICENSE rename to vendor/github.com/containerd/errdefs/LICENSE diff --git a/vendor/github.com/containerd/errdefs/README.md b/vendor/github.com/containerd/errdefs/README.md new file mode 100644 index 00000000..bd418c63 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/README.md @@ -0,0 +1,13 @@ +# errdefs + +A Go package for defining and checking common containerd errors. + +## Project details + +**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/errdefs/errors.go similarity index 100% rename from vendor/github.com/containerd/containerd/errdefs/errors.go rename to vendor/github.com/containerd/errdefs/errors.go diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/errdefs/grpc.go similarity index 100% rename from vendor/github.com/containerd/containerd/errdefs/grpc.go rename to vendor/github.com/containerd/errdefs/grpc.go diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 22dc432b..1706f711 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -23,9 +23,9 @@ var ( // compressionBufferSize is the buffer size used to compress a blob compressionBufferSize = 1048576 - // expectedCompressionFormats is used to check if a blob with a specified media type is compressed + // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed // using the algorithm that the media type says it should be compressed with - expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ + expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{ imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, @@ -62,8 +62,8 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI res.srcCompressorName = internalblobinfocache.Uncompressed } - if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() { - logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name()) + if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name()) } return res, nil } @@ -172,7 +172,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp // bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && - ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() { + ic.compressionFormat != nil && + (ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go index 901d1082..4c6ba82e 100644 --- a/vendor/github.com/containers/image/v5/copy/digesting_reader.go +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -23,11 +23,11 @@ type digestingReader struct { func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { var digester digest.Digester if err := expectedDigest.Validate(); err != nil { - return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest) + return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err) } digestAlgorithm := expectedDigest.Algorithm() if !digestAlgorithm.Available() { - return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm) } digester = digestAlgorithm.Digester() diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index f252e347..a219b58b 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -38,6 +38,7 @@ type instanceCopy struct { // Fields which can be used by callers when operation // is `instanceCopyClone` + cloneArtifactType string cloneCompressionVariant OptionCompressionVariant clonePlatform *imgspecv1.Platform cloneAnnotations map[string]string @@ -142,6 +143,7 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest. res = append(res, instanceCopy{ op: instanceCopyClone, sourceDigest: instanceDigest, + cloneArtifactType: instanceDetails.ReadOnly.ArtifactType, cloneCompressionVariant: compressionVariant, clonePlatform: instanceDetails.ReadOnly.Platform, cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), @@ -268,6 +270,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, AddDigest: updated.manifestDigest, AddSize: int64(len(updated.manifest)), AddMediaType: updated.manifestMIMEType, + AddArtifactType: instance.cloneArtifactType, AddPlatform: instance.clonePlatform, AddAnnotations: instance.cloneAnnotations, AddCompressionAlgorithms: updated.compressionAlgorithms, diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index ce078234..3ac93a9d 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "time" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" @@ -148,13 +149,14 @@ type blobChunkAccessorProxy struct { // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + start := time.Now() rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) if err == nil { total := int64(0) for _, c := range chunks { total += int64(c.Length) } - s.bar.IncrInt64(total) + s.bar.EwmaIncrInt64(total, time.Since(start)) } return rc, errs, err } diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 9e892943..e1a43f75 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -383,7 +383,11 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, compressionAlgos := set.New[string]() for _, srcInfo := range ic.src.LayerInfos() { - if _, c := compressionEditsFromMIMEType(srcInfo); c != nil { + _, c, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return nil, err + } + if c != nil { compressionAlgos.Add(c.Name()) } } @@ -636,21 +640,28 @@ type diffIDResult struct { err error } -// compressionEditsFromMIMEType returns a (CompressionOperation, CompressionAlgorithm) value pair suitable -// for types.BlobInfo, based on a MIME type of srcInfo. -func compressionEditsFromMIMEType(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm) { +// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable +// for types.BlobInfo. +func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) { // This MIME type → compression mapping belongs in manifest-specific code in our manifest // package (but we should preferably replace/change UpdatedImage instead of productizing // this workaround). switch srcInfo.MediaType { case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: - return types.PreserveOriginal, &compression.Gzip + return types.PreserveOriginal, &compression.Gzip, nil case imgspecv1.MediaTypeImageLayerZstd: - return types.PreserveOriginal, &compression.Zstd + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.PreserveOriginal, nil, err + } + if tocDigest != nil { + return types.PreserveOriginal, &compression.ZstdChunked, nil + } + return types.PreserveOriginal, &compression.Zstd, nil case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer: - return types.Decompress, nil + return types.Decompress, nil, nil default: - return types.PreserveOriginal, nil + return types.PreserveOriginal, nil, nil } } @@ -666,7 +677,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // (Sadly UpdatedImage() is documented to not update MediaTypes from // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil { - srcInfo.CompressionOperation, srcInfo.CompressionAlgorithm = compressionEditsFromMIMEType(srcInfo) + op, algo, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return types.BlobInfo{}, "", err + } + srcInfo.CompressionOperation = op + srcInfo.CompressionAlgorithm = algo } ic.c.printCopyInfo("blob", srcInfo) @@ -695,11 +711,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to requiredCompression = ic.compressionFormat } + var tocDigest digest.Digest + // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest. - tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) if err != nil { return types.BlobInfo{}, "", err } + if d != nil { + tocDigest = *d + } reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ Cache: ic.c.blobInfoCache, @@ -718,7 +739,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists") + label := "skipped: already exists" + if reusedBlob.MatchedByTOCDigest { + label = "skipped: already exists (found by TOC)" + } + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) defer bar.Abort(false) bar.mark100PercentComplete() }() @@ -751,7 +776,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to wrapped: ic.c.rawSource, bar: bar, } - uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{ + Cache: ic.c.blobInfoCache, + LayerIndex: layerIndex, + }) if err == nil { if srcInfo.Size != -1 { refill := srcInfo.Size - bar.Current() diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 6ce8f700..a60ec563 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -978,13 +978,10 @@ func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, t // This function can return nil reader when no url is supported by this function. In this case, the caller // should fallback to fetch the non-external blob (i.e. pull from the registry). func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) if len(urls) == 0 { return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") } + var remoteErrors []error for _, u := range urls { blobURL, err := url.Parse(u) if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") { @@ -993,24 +990,28 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R // NOTE: we must not authenticate on additional URLs as those // can be abused to leak credentials or tokens. Please // refer to CVE-2020-15157 for more information. - resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) - logrus.Debug(err) - resp.Body.Close() - continue - } - break + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) + if err != nil { + remoteErrors = append(remoteErrors, err) + continue } + if resp.StatusCode != http.StatusOK { + err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + remoteErrors = append(remoteErrors, err) + logrus.Debug(err) + resp.Body.Close() + continue + } + return resp.Body, getBlobSize(resp), nil } - if resp == nil && err == nil { + if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob } - if err != nil { - return nil, 0, err + err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0]) + for _, e := range remoteErrors[1:] { + err = fmt.Errorf("%s, %w", err, e) } - return resp.Body, getBlobSize(resp), nil + return nil, 0, err } func getBlobSize(resp *http.Response) int64 { diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go index a8a9f51b..553569a0 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go @@ -17,7 +17,8 @@ func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions // The caller must re-compress to build those annotations. return false } - if candidateCompression == nil || (options.RequiredCompression.Name() != candidateCompression.Name()) { + if candidateCompression == nil || + (options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { return false } } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go index 0dc6bd5a..bbb53c19 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" ) @@ -39,7 +38,7 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { +func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go index 189f1a71..1d60da75 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -73,6 +73,7 @@ type ListUpdate struct { Platform *imgspecv1.Platform Annotations map[string]string CompressionAlgorithmNames []string + ArtifactType string } } @@ -101,6 +102,7 @@ type ListEdit struct { AddDigest digest.Digest AddSize int64 AddMediaType string + AddArtifactType string AddPlatform *imgspecv1.Platform AddAnnotations map[string]string AddCompressionAlgorithms []compression.Algorithm diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go index 6f7bc8bb..c77db752 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -169,7 +169,8 @@ func NormalizedMIMEType(input string) string { // CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values. func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool { - switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ? + // Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm(). + switch algo.Name() { case compressiontypes.GzipAlgorithmName: return true default: @@ -182,7 +183,9 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes if CompressionAlgorithmIsUniversallySupported(algo) { return true } - switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ? + // This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields. + // The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge. + switch algo.Name() { case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName: return mimeType == imgspecv1.MediaTypeImageManifest default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index d8d06513..829852a8 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -61,6 +61,7 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate ret.ReadOnly.Platform = manifest.Platform ret.ReadOnly.Annotations = manifest.Annotations ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations) + ret.ReadOnly.ArtifactType = manifest.ArtifactType return ret, nil } } @@ -102,7 +103,7 @@ func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, an *annotationsMap = map[string]string{} } for _, algo := range compressionAlgorithms { - switch algo.Name() { + switch algo.BaseVariantName() { case compression.ZstdAlgorithmName: (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue default: @@ -157,11 +158,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { } addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations) addedEntries = append(addedEntries, imgspecv1.Descriptor{ - MediaType: editInstance.AddMediaType, - Size: editInstance.AddSize, - Digest: editInstance.AddDigest, - Platform: editInstance.AddPlatform, - Annotations: annotations}) + MediaType: editInstance.AddMediaType, + ArtifactType: editInstance.AddArtifactType, + Size: editInstance.AddSize, + Digest: editInstance.AddDigest, + Platform: editInstance.AddPlatform, + Annotations: annotations, + }) default: return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } @@ -299,12 +302,13 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation platform = &platformCopy } m := imgspecv1.Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: slices.Clone(component.URLs), - Annotations: maps.Clone(component.Annotations), - Platform: platform, + MediaType: component.MediaType, + ArtifactType: component.ArtifactType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + Annotations: maps.Clone(component.Annotations), + Platform: platform, } index.Manifests[i] = m } diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 7037755b..562adbea 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -55,7 +55,7 @@ type ImageDestinationInternalOnly interface { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error) + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). @@ -100,6 +100,12 @@ type PutBlobOptions struct { LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. } +// PutBlobPartialOptions are used in PutBlobPartial. +type PutBlobPartialOptions struct { + Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. + LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs) +} + // TryReusingBlobOptions are used in TryReusingBlobWithOptions. type TryReusingBlobOptions struct { Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. @@ -118,7 +124,7 @@ type TryReusingBlobOptions struct { PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob. RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”. - TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest. + TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest. } // ReusedBlob is information about a blob reused in a destination. @@ -130,6 +136,8 @@ type ReusedBlob struct { // a differently-compressed blob. CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A + + MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } // ImageSourceChunk is a portion of a blob. diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 1bdcf3d3..de462811 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -55,7 +55,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType if variants != nil { name := mtsUncompressed if algorithm != nil { - name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + name = algorithm.BaseVariantName() } if res, ok := variants[name]; ok { if res != mtsUnsupportedMIMEType { diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 6d5acb45..548994ff 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -9,7 +9,6 @@ import ( compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" ociencspec "github.com/containers/ocicrypt/spec" - chunkedToc "github.com/containers/storage/pkg/chunked/toc" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -260,44 +259,9 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) { if err := m.Config.Digest.Validate(); err != nil { return "", err } - - // If there is any layer that is using partial content, we calculate the image ID - // in a different way since the diffID cannot be validated as for regular pulled images. - for _, layer := range m.Layers { - toc, err := chunkedToc.GetTOCDigest(layer.Annotations) - if err != nil { - return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err) - } - if toc != nil { - return m.calculateImageIDForPartialImage(diffIDs) - } - } - return m.Config.Digest.Hex(), nil } -func (m *OCI1) calculateImageIDForPartialImage(diffIDs []digest.Digest) (string, error) { - newID := digest.Canonical.Digester() - for i, layer := range m.Layers { - diffID := diffIDs[i] - _, err := newID.Hash().Write([]byte(diffID.Hex())) - if err != nil { - return "", fmt.Errorf("error writing diffID %q: %w", diffID, err) - } - toc, err := chunkedToc.GetTOCDigest(layer.Annotations) - if err != nil { - return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err) - } - if toc != nil { - _, err = newID.Hash().Write([]byte(toc.Hex())) - if err != nil { - return "", fmt.Errorf("error writing TOC %q: %w", toc, err) - } - } - } - return newID.Digest().Hex(), nil -} - // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image // (and the code can handle that). // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 6ca618e3..a3eb5d7a 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -6,7 +6,6 @@ import ( "io" "os" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/private" @@ -120,8 +119,8 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 50a5339e..656f4518 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -12,7 +12,6 @@ import ( "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" @@ -128,8 +127,8 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index 4443dda7..b83a257e 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -19,19 +19,19 @@ type Algorithm = types.Algorithm var ( // Gzip compression. - Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName, + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) // Bzip2 compression. - Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName, + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) // Xz compression. - Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName, + Xz = internal.NewAlgorithm(types.XzAlgorithmName, "", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) // Zstd compression. - Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) - // ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files. - ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ + // ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, nil, ZstdDecompressor, compressor.ZstdCompressor) compressionAlgorithms = map[string]Algorithm{ diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go index ba619be0..d6f85274 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -12,23 +12,28 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error) // Algorithm is a compression algorithm that can be used for CompressStream. type Algorithm struct { - name string - mime string - prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. - decompressor DecompressorFunc - compressor CompressorFunc + name string + baseVariantName string + prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. + decompressor DecompressorFunc + compressor CompressorFunc } // NewAlgorithm creates an Algorithm instance. +// nontrivialBaseVariantName is typically "". // This function exists so that Algorithm instances can only be created by code that // is allowed to import this internal subpackage. -func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { +func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + baseVariantName := name + if nontrivialBaseVariantName != "" { + baseVariantName = nontrivialBaseVariantName + } return Algorithm{ - name: name, - mime: mime, - prefix: prefix, - decompressor: decompressor, - compressor: compressor, + name: name, + baseVariantName: baseVariantName, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, } } @@ -37,10 +42,11 @@ func (c Algorithm) Name() string { return c.name } -// InternalUnstableUndocumentedMIMEQuestionMark ??? -// DO NOT USE THIS anywhere outside of c/image until it is properly documented. -func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string { - return c.mime +// BaseVariantName returns the name of the “base variant” of the compression algorithm. +// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”). +// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data. +func (c Algorithm) BaseVariantName() string { + return c.baseVariantName } // AlgorithmCompressor returns the compressor field of algo. diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go b/vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go index 4ba98b98..955e8760 100644 --- a/vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go +++ b/vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go @@ -142,9 +142,13 @@ func WithFulcioAndInteractiveOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oi } logrus.Debugf("Starting interactive OIDC authentication for issuer %s", oidcIssuerURL.Redacted()) - // This is intended to match oauthflow.DefaultIDTokenGetter, overriding only input/output + // This is intended to match oauthflow.DefaultIDTokenGetter (incl. the update in init()), overriding only input/output + htmlPage, err := oauth.GetInteractiveSuccessHTML(false, 10) + if err != nil { + return fmt.Errorf("formatting HTML content: %w", err) + } tokenGetter := &oauthflow.InteractiveIDTokenGetter{ - HTMLPage: oauth.InteractiveSuccessHTML, + HTMLPage: htmlPage, Input: interactiveInput, Output: interactiveOutput, } diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 53569793..c06d6c09 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -16,7 +16,6 @@ import ( "sync/atomic" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" @@ -55,41 +54,61 @@ type storageImageDestination struct { stubs.ImplementsPutBlobPartial stubs.AlwaysSupportsSignatures - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - manifestDigest digest.Digest // Valid if len(manifest) != 0 - signatures []byte // Signature contents, temporary - signatureses map[digest.Digest][]byte // Instance signature contents, temporary - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + manifestDigest digest.Digest // Valid if len(manifest) != 0 + untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs, valid if not nil + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + metadata storageImageMetadata // Metadata contents being built - // A storage destination may be used concurrently. Accesses are - // serialized via a mutex. Please refer to the individual comments - // below for details. - lock sync.Mutex // Mapping from layer (by index) to the associated ID in the storage. // It's protected *implicitly* since `commitLayer()`, at any given // time, can only be executed by *one* goroutine. Please refer to // `queueOrCommit()` for further details on how the single-caller // guarantee is implemented. - indexToStorageID map[int]*string - // All accesses to below data are protected by `lock` which is made - // *explicit* in the code. - uncompressedOrTocDigest map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs or TOC IDs. - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) - indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer - diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output + indexToStorageID map[int]string + + // A storage destination may be used concurrently, due to HasThreadSafePutBlob. + lock sync.Mutex // Protects lockProtected + lockProtected storageImageDestinationLockProtected +} + +// storageImageDestinationLockProtected contains storageImageDestination data which might be +// accessed concurrently, due to HasThreadSafePutBlob. +// _During the concurrent TryReusingBlob/PutBlob/* calls_ (but not necessarily during the final Commit) +// uses must hold storageImageDestination.lock. +type storageImageDestinationLockProtected struct { + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image + + // In general, a layer is identified either by (compressed) digest, or by TOC digest. + // When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values + // we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not + // recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.) + + // Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called. + // The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted. + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest + + // Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames) + // should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. + // They are looked up in the order they are mentioned above. + diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + // Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set. + filenames map[digest.Digest]string + // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set. + fileSizes map[digest.Digest]int64 } // addedLayerInfo records data about a layer to use in this image. type addedLayerInfo struct { - digest digest.Digest - emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. + digest digest.Digest // Mandatory, the digest of the layer. + emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. } // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until @@ -117,18 +136,23 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* HasThreadSafePutBlob: true, }), - imageRef: imageRef, - directory: directory, - signatureses: make(map[digest.Digest][]byte), - uncompressedOrTocDigest: make(map[digest.Digest]digest.Digest), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - indexToStorageID: make(map[int]*string), - indexToAddedLayerInfo: make(map[int]addedLayerInfo), - diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + metadata: storageImageMetadata{ + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + }, + indexToStorageID: make(map[int]string), + lockProtected: storageImageDestinationLockProtected{ + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + indexToTOCDigest: make(map[int]digest.Digest), + diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + filenames: make(map[digest.Digest]string), + fileSizes: make(map[digest.Digest]int64), + }, } dest.Compat = impl.AddCompat(dest) return dest, nil @@ -142,12 +166,13 @@ func (s *storageImageDestination) Reference() types.ImageReference { // Close cleans up the temporary directory and additional layer store handlers. func (s *storageImageDestination) Close() error { - for _, al := range s.blobAdditionalLayer { + // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + for _, al := range s.lockProtected.blobAdditionalLayer { al.Release() } - for _, v := range s.diffOutputs { + for _, v := range s.lockProtected.diffOutputs { if v.Target != "" { - _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) + _ = s.imageRef.transport.store.CleanupStagedLayer(v) } } return os.RemoveAll(s.directory) @@ -227,9 +252,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf // Record information about the blob. s.lock.Lock() - s.uncompressedOrTocDigest[blobDigest] = diffID.Digest() - s.fileSizes[blobDigest] = counter.Count - s.filenames[blobDigest] = filename + s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest() + s.lockProtected.fileSizes[blobDigest] = counter.Count + s.lockProtected.filenames[blobDigest] = filename s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). @@ -269,7 +294,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { fetcher := zstdFetcher{ chunkAccessor: chunkAccessor, ctx: ctx, @@ -286,13 +311,25 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces return private.UploadedBlob{}, err } + if out.TOCDigest == "" && out.UncompressedDigest == "" { + return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set") + } + blobDigest := srcInfo.Digest s.lock.Lock() - s.uncompressedOrTocDigest[blobDigest] = blobDigest - s.fileSizes[blobDigest] = 0 - s.filenames[blobDigest] = "" - s.diffOutputs[blobDigest] = out + if out.UncompressedDigest != "" { + // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is + // responsible for ensuring blobDigest has been validated. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + } else { + // Don’t identify layers by TOC if UncompressedDigest is available. + // - Using UncompressedDigest allows image reuse with non-partially-pulled layers + // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. + // That TOC is quite unlikely to match with any other TOC value. + s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest + } + s.lockProtected.diffOutputs[options.LayerIndex] = out s.lock.Unlock() return private.UploadedBlob{ @@ -321,68 +358,79 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, }) } -// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.uncompressedOrTocDigest and other metadata. +// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata. // The caller must arrange the blob to be eventually committed using s.commitLayer(). -func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { +func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() if options.SrcRef != nil { // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String()) + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String()) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err) } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.uncompressedOrTocDigest[digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[digest] = aLayer + s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest() + s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: aLayer.CompressedSize(), }, nil } } - if digest == "" { + if blobDigest == "" { return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`) } - if err := digest.Validate(); err != nil { + if err := blobDigest.Validate(); err != nil { return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) } + if options.TOCDigest != "" { + if err := options.TOCDigest.Validate(); err != nil { + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) + } + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. // Check if we've already cached it in a file. - if size, ok := s.fileSizes[digest]; ok { + if size, ok := s.lockProtected.fileSizes[blobDigest]; ok { + // s.lockProtected.blobDiffIDs is set either by putBlobToPendingFile or in createNewLayer when creating the + // filenames/fileSizes entry. return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: size, }, nil } - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobDigest, err) } if len(layers) > 0 { - // Save this for completeness. - s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[blobDigest] = blobDigest return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: layers[0].UncompressedSize, }, nil } // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest) + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobDigest, err) } if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest + // LayersByCompressedDigest only finds layers which were created from a full layer blob, and extracting that + // always sets UncompressedDigest. + diffID := layers[0].UncompressedDigest + if diffID == "" { + return false, private.ReusedBlob{}, fmt.Errorf("internal error: compressed layer %q (for compressed digest %q) does not have an uncompressed digest", layers[0].ID, blobDigest.String()) + } + s.lockProtected.blobDiffIDs[blobDigest] = diffID return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: layers[0].CompressedSize, }, nil } @@ -391,23 +439,23 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. if options.CanSubstitute || size != -1 { - if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest { + if uncompressedDigest := options.Cache.UncompressedDigest(blobDigest); uncompressedDigest != "" && uncompressedDigest != blobDigest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } if len(layers) > 0 { if size != -1 { - s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: size, }, nil } if !options.CanSubstitute { - return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest) + return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest) } - s.uncompressedOrTocDigest[uncompressedDigest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest return true, private.ReusedBlob{ Digest: uncompressedDigest, Size: layers[0].UncompressedSize, @@ -416,23 +464,30 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, } } - tocDigest := digest - if options.TOCDigest != nil { - tocDigest = *options.TOCDigest - } + if options.TOCDigest != "" && options.LayerIndex != nil { + // Check if we have a chunked layer in storage with the same TOC digest. + layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest) - // Check if we have a chunked layer in storage with the same TOC digest. - layers, err = s.imageRef.transport.store.LayersByTOCDigest(tocDigest) - if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, tocDigest, err) - } - if len(layers) > 0 { - // Save this for completeness. - s.uncompressedOrTocDigest[digest] = layers[0].TOCDigest - return true, private.ReusedBlob{ - Digest: layers[0].TOCDigest, - Size: layers[0].UncompressedSize, - }, nil + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err) + } + if len(layers) > 0 { + if size != -1 { + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + return true, private.ReusedBlob{ + Digest: blobDigest, + Size: size, + MatchedByTOCDigest: true, + }, nil + } else if options.CanSubstitute && layers[0].UncompressedDigest != "" { + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + return true, private.ReusedBlob{ + Digest: layers[0].UncompressedDigest, + Size: layers[0].UncompressedSize, + MatchedByTOCDigest: true, + }, nil + } + } } // Nope, we don't have it. @@ -444,6 +499,8 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, // that since we don't have a recommendation, a random ID should be used if one needs // to be allocated. func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + // Build the diffID list. We need the decompressed sums that we've been calculating to // fill in the DiffIDs. It's expected (but not enforced by us) that the number of // diffIDs corresponds to the number of non-EmptyLayer entries in the history. @@ -457,28 +514,59 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { continue } blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.uncompressedOrTocDigest[blobSum] + diffID, ok := s.lockProtected.blobDiffIDs[blobSum] if !ok { + // this can, in principle, legitimately happen when a layer is reused by TOC. logrus.Infof("error looking up diffID for layer %q", blobSum.String()) return "" } diffIDs = append([]digest.Digest{diffID}, diffIDs...) } - case *manifest.Schema2: + case *manifest.Schema2, *manifest.OCI1: // We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate // the diffID list. - case *manifest.OCI1: - for _, l := range m.Layers { - diffIDs = append(diffIDs, l.Digest) - } default: return "" } - id, err := m.ImageID(diffIDs) + + // We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption. + // + // Traditionally that means the same ~config digest, as computed by m.ImageID; + // but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest, + // nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see + // most of the data. + // + // So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC, + // must enter into the image ID computation. + // But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades, + // and to introduce the changed behavior only when partial pulls are used. + // + // Note that it’s not 100% guaranteed that an image pulled by TOC uses an OCI manifest; consider + // (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above. + ordinaryImageID, err := m.ImageID(diffIDs) if err != nil { return "" } - return id + tocIDInput := "" + hasLayerPulledByTOC := false + for i := range m.LayerInfos() { + layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case. + tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC + if ok { + hasLayerPulledByTOC = true + layerValue = tocDigest.String() + } + tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator. + } + + if !hasLayerPulledByTOC { + return ordinaryImageID + } + // ordinaryImageID is a digest of a config, which is a JSON value. + // To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON. + tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex() + logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID) + return tocImageID } // getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig @@ -491,7 +579,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err) } // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { + if filename, ok := s.lockProtected.filenames[info.Digest]; ok { contents, err2 := os.ReadFile(filename) if err2 != nil { return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2) @@ -525,17 +613,17 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) // caller is the "worker" routine committing layers. All other routines // can continue pulling and queuing in layers. s.lock.Lock() - s.indexToAddedLayerInfo[index] = info + s.lockProtected.indexToAddedLayerInfo[index] = info // We're still waiting for at least one previous/parent layer to be // committed, so there's nothing to do. - if index != s.currentIndex { + if index != s.lockProtected.currentIndex { s.lock.Unlock() return nil } for { - info, ok := s.indexToAddedLayerInfo[index] + info, ok := s.lockProtected.indexToAddedLayerInfo[index] if !ok { break } @@ -550,25 +638,30 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) // Set the index at the very end to make sure that only one routine // enters stage 2). - s.currentIndex = index + s.lockProtected.currentIndex = index s.lock.Unlock() return nil } -// getDiffIDOrTOCDigest returns the diffID for the specified digest or the digest for the TOC, if known. -func (s *storageImageDestination) getDiffIDOrTOCDigest(uncompressedDigest digest.Digest) (digest.Digest, bool) { +// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID, +// and an indication whether the input already has the shape of a layer ID. +// It returns ("", false) if the layer is not found at all (which should never happen) +func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) { s.lock.Lock() defer s.lock.Unlock() - if d, found := s.diffOutputs[uncompressedDigest]; found { - return d.TOCDigest, found + if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found { + return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. } - d, found := s.uncompressedOrTocDigest[uncompressedDigest] - return d, found + + if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found { + return d.Hex(), true // This looks like chain IDs, and it uses the traditional value. + } + return "", false } // commitLayer commits the specified layer with the given index to the storage. -// size can usually be -1; it can be provided if the layer is not known to be already present in uncompressedOrTocDigest. +// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. // // If the layer cannot be committed yet, the function returns (true, nil). // @@ -586,29 +679,38 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si // Start with an empty string or the previous layer ID. Note that // `s.indexToStorageID` can only be accessed by *one* goroutine at any // given time. Hence, we don't need to lock accesses. - var lastLayer string - if prev := s.indexToStorageID[index-1]; prev != nil { - lastLayer = *prev + var parentLayer string + if index != 0 { + prev, ok := s.indexToStorageID[index-1] + if !ok { + return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1) + } + parentLayer = prev } // Carry over the previous ID for empty non-base layers. if info.emptyLayer { - s.indexToStorageID[index] = &lastLayer + s.indexToStorageID[index] = parentLayer return false, nil } // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - // The diffIDOrTOCDigest refers either to the DiffID or the digest of the TOC. - diffIDOrTOCDigest, haveDiffIDOrTOCDigest := s.getDiffIDOrTOCDigest(info.digest) - if !haveDiffIDOrTOCDigest { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // The layerID refers either to the DiffID or the digest of the TOC. + layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest) + if layerIDComponent == "" { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / … + // + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller // that relies on using a blob digest that has never been seen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID or TOC digest for blob %+v", info.digest) + // + // We are also ignoring lookups by TOC, and other non-trivial situations. + // Those can only happen using the c/image/internal/private API, + // so those internal callers should be fixed to follow the API instead of expanding this fallback. + logrus.Debugf("looking for diffID for blob=%+v", info.digest) + // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit. has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{ Cache: none.NoCache, @@ -618,115 +720,140 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) } if !has { - return false, fmt.Errorf("error determining uncompressed digest or TOC digest for blob %q", info.digest.String()) + return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) } - diffIDOrTOCDigest, haveDiffIDOrTOCDigest = s.getDiffIDOrTOCDigest(info.digest) - if !haveDiffIDOrTOCDigest { - return false, fmt.Errorf("we have blob %q, but don't know its uncompressed or TOC digest", info.digest.String()) + + layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest) + if layerIDComponent == "" { + return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String()) } } - id := diffIDOrTOCDigest.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffIDOrTOCDigest.Hex())).Hex() + + id := layerIDComponent + if !layerIDComponentStandalone || parentLayer != "" { + id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex() } if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer + s.indexToStorageID[index] = layer.ID return false, nil } + layer, err := s.createNewLayer(index, info.digest, parentLayer, id) + if err != nil { + return false, err + } + if layer == nil { + return true, nil + } + s.indexToStorageID[index] = layer.ID + return false, nil +} + +// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be ""). +// If the layer cannot be committed yet, the function returns (nil, nil). +func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) { s.lock.Lock() - diffOutput, ok := s.diffOutputs[info.digest] + diffOutput, ok := s.lockProtected.diffOutputs[index] s.lock.Unlock() if ok { - if s.manifest == nil { - logrus.Debugf("Skipping commit for TOC=%q, manifest not yet available", id) - return true, nil + var untrustedUncompressedDigest digest.Digest + if diffOutput.UncompressedDigest == "" { + d, err := s.untrustedLayerDiffID(index) + if err != nil { + return nil, err + } + if d == "" { + logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID) + return nil, nil + } + untrustedUncompressedDigest = d } - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) - if err != nil { - return false, fmt.Errorf("parsing manifest: %w", err) - } - - cb, err := s.getConfigBlob(man.ConfigInfo()) - if err != nil { - return false, err - } - - // retrieve the expected uncompressed digest from the config blob. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return false, err - } - if index >= len(configOCI.RootFS.DiffIDs) { - return false, fmt.Errorf("index %d out of range for configOCI.RootFS.DiffIDs", index) - } - - layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) - if err != nil { - return false, err - } - - // let the storage layer know what was the original uncompressed layer. flags := make(map[string]interface{}) - flags[expectedLayerDiffIDFlag] = configOCI.RootFS.DiffIDs[index] - logrus.Debugf("Setting uncompressed digest to %q for layer %q", configOCI.RootFS.DiffIDs[index], id) - options := &graphdriver.ApplyDiffWithDifferOpts{ - Flags: flags, + if untrustedUncompressedDigest != "" { + flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest + logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID) } - if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, options); err != nil { - _ = s.imageRef.transport.store.Delete(layer.ID) - return false, err - } + args := storage.ApplyStagedLayerOptions{ + ID: newLayerID, + ParentLayer: parentLayer, - s.indexToStorageID[index] = &layer.ID - return false, nil + DiffOutput: diffOutput, + DiffOptions: &graphdriver.ApplyDiffWithDifferOpts{ + Flags: flags, + }, + } + layer, err := s.imageRef.transport.store.ApplyStagedLayer(args) + if err != nil && !errors.Is(err, storage.ErrDuplicateID) { + return nil, fmt.Errorf("failed to put layer using a partial pull: %w", err) + } + return layer, nil } s.lock.Lock() - al, ok := s.blobAdditionalLayer[info.digest] + al, ok := s.lockProtected.blobAdditionalLayer[layerDigest] s.lock.Unlock() if ok { - layer, err := al.PutAs(id, lastLayer, nil) + layer, err := al.PutAs(newLayerID, parentLayer, nil) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return false, fmt.Errorf("failed to put layer from digest and labels: %w", err) + return nil, fmt.Errorf("failed to put layer from digest and labels: %w", err) } - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer - return false, nil + return layer, nil } // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. + var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions s.lock.Lock() - filename, ok := s.filenames[info.digest] + tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set + optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set + filename, gotFilename := s.lockProtected.filenames[layerDigest] s.lock.Unlock() - if !ok { - // Try to find the layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffIDOrTOCDigest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest) + if gotFilename && tocDigest == "" { + // If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based, + // and we don't know the relationship of the layerDigest and TOC digest. + // We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer + // reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the + // originally-planned TOC match. + + // Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream. + trustedUncompressedDigest = optionalDiffID + trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching. + } else { + // Try to find the layer with contents matching the data we use. + var layer *storage.Layer // = nil + if tocDigest != "" { + layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest) if err2 == nil && len(layers) > 0 { - layer = layers[0].ID + layer = &layers[0] + } else { + return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2) + } + } else { + // Because tocDigest == "", optionaldiffID must have been set + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID) + if err2 == nil && len(layers) > 0 { + layer = &layers[0] + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest) + if err2 == nil && len(layers) > 0 { + layer = &layers[0] + } + } + if layer == nil { + return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2) } - } - if layer == "" { - return false, fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) } // Read the layer's contents. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ Compression: &noCompression, } - diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err2 != nil { - return false, fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) + return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants @@ -736,41 +863,112 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600) if err != nil { diff.Close() - return false, fmt.Errorf("creating temporary file %q: %w", filename, err) + return nil, fmt.Errorf("creating temporary file %q: %w", filename, err) } // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using // ctx.Done(). - _, err = io.Copy(file, diff) + fileSize, err := io.Copy(file, diff) diff.Close() file.Close() if err != nil { - return false, fmt.Errorf("storing blob to file %q: %w", filename, err) + return nil, fmt.Errorf("storing blob to file %q: %w", filename, err) + } + + if optionalDiffID == "" && layer.UncompressedDigest != "" { + optionalDiffID = layer.UncompressedDigest + } + // The stream we have is uncompressed, this matches contents of the stream. + // If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream. + trustedUncompressedDigest = optionalDiffID + // FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted). + // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created + // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). + // + // We can legitimately set storage.LayerOptions.OriginalDigest to "", + // but that would just result in PutLayer computing the digest of the input stream == optionalDiffID. + // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. + trustedOriginalDigest = optionalDiffID + + // Allow using the already-collected layer contents without extracting the layer again. + // + // This only matches against the uncompressed digest. + // We don’t have the original compressed data here to trivially set filenames[layerDigest]. + // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. + // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. + if trustedUncompressedDigest != "" { + s.lock.Lock() + s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest + s.lockProtected.filenames[trustedUncompressedDigest] = filename + s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize + s.lock.Unlock() } - // Make sure that we can find this file later, should we need the layer's - // contents again. - s.lock.Lock() - s.filenames[info.digest] = filename - s.lock.Unlock() } // Read the cached blob and use it as a diff. file, err := os.Open(filename) if err != nil { - return false, fmt.Errorf("opening file %q: %w", filename, err) + return nil, fmt.Errorf("opening file %q: %w", filename, err) } defer file.Close() // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: info.digest, - UncompressedDigest: diffIDOrTOCDigest, + layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: trustedOriginalDigest, + UncompressedDigest: trustedUncompressedDigest, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return false, fmt.Errorf("adding layer with blob %q: %w", info.digest, err) + return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err) + } + return layer, nil +} + +// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config. +// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil). +// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication. +func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) { + // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and + // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. + // Either way this function does not need the protection of s.lock. + if s.manifest == nil { + logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex) + return "", nil } - s.indexToStorageID[index] = &layer.ID - return false, nil + if s.untrustedDiffIDValues == nil { + mt := manifest.GuessMIMEType(s.manifest) + if mt != imgspecv1.MediaTypeImageManifest { + // We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage, + // and then use types.Image.OCIConfig so that we can parse the image. + // + // In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation), + // while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull). + // So it is not implemented yet. + return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt) + } + man, err := manifest.FromBlob(s.manifest, mt) + if err != nil { + return "", fmt.Errorf("parsing manifest: %w", err) + } + + cb, err := s.getConfigBlob(man.ConfigInfo()) + if err != nil { + return "", err + } + + // retrieve the expected uncompressed digest from the config blob. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return "", err + } + s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs) + if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… + s.untrustedDiffIDValues = []digest.Digest{} + } + } + if layerIndex >= len(s.untrustedDiffIDValues) { + return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex) + } + return s.untrustedDiffIDValues[layerIndex], nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. @@ -781,6 +979,8 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + if len(s.manifest) == 0 { return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") } @@ -827,12 +1027,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } } var lastLayer string - if len(layerBlobs) > 0 { // Can happen when using caches - prev := s.indexToStorageID[len(layerBlobs)-1] - if prev == nil { + if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts. + prev, ok := s.indexToStorageID[len(layerBlobs)-1] + if !ok { return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) } - lastLayer = *prev + lastLayer = prev } // If one of those blobs was a configuration blob, then we can try to dig out the date when the image @@ -846,14 +1046,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so // we just need to screen out the ones that are actually layers to get the list of non-layers. dataBlobs := set.New[digest.Digest]() - for blob := range s.filenames { + for blob := range s.lockProtected.filenames { dataBlobs.Add(blob) } for _, layerBlob := range layerBlobs { dataBlobs.Delete(layerBlob.Digest) } for _, blob := range dataBlobs.Values() { - v, err := os.ReadFile(s.filenames[blob]) + v, err := os.ReadFile(s.lockProtected.filenames[blob]) if err != nil { return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) } @@ -906,7 +1106,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } // Set up to save our metadata. - metadata, err := json.Marshal(s) + metadata, err := json.Marshal(s.metadata) if err != nil { return fmt.Errorf("encoding metadata for image: %w", err) } @@ -1011,7 +1211,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s } if instanceDigest == nil { s.signatures = sigblob - s.SignatureSizes = sizes + s.metadata.SignatureSizes = sizes if len(s.manifest) > 0 { manifestDigest := s.manifestDigest instanceDigest = &manifestDigest @@ -1019,7 +1219,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s } if instanceDigest != nil { s.signatureses[*instanceDigest] = sigblob - s.SignaturesSizes[*instanceDigest] = sizes + s.metadata.SignaturesSizes[*instanceDigest] = sizes } return nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index ac09f3db..eed846eb 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -18,11 +18,6 @@ var ( ErrNoSuchImage = storage.ErrNotAnImage ) -type storageImageCloser struct { - types.ImageCloser - size int64 -} - // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey @@ -36,6 +31,17 @@ func signatureBigDataKey(digest digest.Digest) string { return "signature-" + digest.Encoded() } +// storageImageMetadata is stored, as JSON, in storage.Image.Metadata +type storageImageMetadata struct { + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + // Size() returns the previously-computed size of the image, with no error. func (s *storageImageCloser) Size() (int64, error) { return s.size, nil diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 7022d322..ead3300f 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -29,16 +29,6 @@ import ( "github.com/sirupsen/logrus" ) -// getBlobMutexProtected is a struct to hold the state of the getBlobMutex mutex. -type getBlobMutexProtected struct { - // digestToLayerID is a lookup map from the layer digest (either the uncompressed digest or the TOC digest) to the - // layer ID in the store. - digestToLayerID map[digest.Digest]string - - // layerPosition stores where we are in reading a blob's layers - layerPosition map[digest.Digest]int -} - type storageImageSource struct { impl.Compat impl.PropertyMethodsInitialize @@ -47,13 +37,25 @@ type storageImageSource struct { imageRef storageReference image *storage.Image systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions (it guards layerPosition and digestToLayerID) + metadata storageImageMetadata + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions getBlobMutexProtected getBlobMutexProtected - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice } +// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex. +type getBlobMutexProtected struct { + // digestToLayerID is a lookup map from a possibly-untrusted uncompressed layer digest (as returned by LayerInfosForCopy) to the + // layer ID in the store. + digestToLayerID map[digest.Digest]string + + // layerPosition stores where we are in reading a blob's layers + layerPosition map[digest.Digest]int +} + +// expectedLayerDiffIDFlag is a per-layer flag containing an UNTRUSTED uncompressed digest of the layer. +// It is set when pulling a layer by TOC; later, this value is used with digestToLayerID +// to allow identifying the layer — and the consumer is expected to verify the blob returned by GetBlob against the digest. const expectedLayerDiffIDFlag = "expected-layer-diffid" // newImageSource sets up an image for reading. @@ -71,11 +73,13 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora }), NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef), - imageRef: imageRef, - systemContext: sys, - image: img, - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), + imageRef: imageRef, + systemContext: sys, + image: img, + metadata: storageImageMetadata{ + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + }, getBlobMutexProtected: getBlobMutexProtected{ digestToLayerID: make(map[digest.Digest]string), layerPosition: make(map[digest.Digest]int), @@ -83,7 +87,7 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora } image.Compat = impl.AddCompat(image) if img.Metadata != "" { - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + if err := json.Unmarshal([]byte(img.Metadata), &image.metadata); err != nil { return nil, fmt.Errorf("decoding metadata for source image: %w", err) } } @@ -118,8 +122,9 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c var layers []storage.Layer - // If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest - // to retrieve it from the storage. + // This lookup path is strictly necessary for layers identified by TOC digest + // (where LayersByUncompressedDigest might not find our layer); + // for other layers it is an optimization to avoid the cost of the LayersByUncompressedDigest call. s.getBlobMutex.Lock() layerID, found := s.getBlobMutexProtected.digestToLayerID[digest] s.getBlobMutex.Unlock() @@ -297,29 +302,29 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige if err != nil { return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err) } - if layer.UncompressedDigest == "" && layer.TOCDigest == "" { - return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID) - } if layer.UncompressedSize < 0 { return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID) } blobDigest := layer.UncompressedDigest - - if layer.TOCDigest != "" { + if blobDigest == "" { + if layer.TOCDigest == "" { + return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID) + } if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil { return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) } - if expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string); ok { - // if the layer is stored by its TOC, report the expected diffID as the layer Digest - // but store the TOC digest so we can later retrieve it from the storage. - blobDigest, err = digest.Parse(expectedDigest) - if err != nil { - return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err) - } - } else { + expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string) + if !ok { return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) } + // If the layer is stored by its TOC, report the expected diffID as the layer Digest; + // the generic code is responsible for validating the digest. + // We can locate the layer without further c/storage help using s.getBlobMutexProtected.digestToLayerID. + blobDigest, err = digest.Parse(expectedDigest) + if err != nil { + return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err) + } } s.getBlobMutex.Lock() s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID @@ -375,11 +380,11 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos [] func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { var offset int signatureBlobs := []byte{} - signatureSizes := s.SignatureSizes + signatureSizes := s.metadata.SignatureSizes key := "signatures" instance := "default instance" if instanceDigest != nil { - signatureSizes = s.SignaturesSizes[*instanceDigest] + signatureSizes = s.metadata.SignaturesSizes[*instanceDigest] key = signatureBigDataKey(*instanceDigest) instance = instanceDigest.Encoded() } @@ -425,7 +430,7 @@ func (s *storageImageSource) getSize() (int64, error) { sum += bigSize } // Add the signature sizes. - for _, sigSize := range s.SignatureSizes { + for _, sigSize := range s.metadata.SignatureSizes { sum += int64(sigSize) } // Walk the layer list. diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 09f4d9e9..13bc20e7 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20231208t193858z-f39f38d13" + IMAGE_SUFFIX: "c20240102t155643z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 77189d49..8461c090 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -41,7 +41,7 @@ containers-storage: ## build using gc on the host $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage codespell: - codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w binary local-binary: containers-storage diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index a63cb35e..d6b55473 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.52.0 +1.52.1-dev diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index f71ee693..aa99fdea 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -196,6 +196,8 @@ type DriverWithDifferOutput struct { BigData map[string][]byte TarSplit []byte TOCDigest digest.Digest + // RootDirMode is the mode of the root directory of the layer, if specified. + RootDirMode *os.FileMode // Artifacts is a collection of additional artifacts // generated by the differ that the storage driver can use. Artifacts map[string]interface{} @@ -212,10 +214,26 @@ const ( DifferOutputFormatFlat ) +type DifferFsVerity int + +const ( + // DifferFsVerityDisabled means no fs-verity is used + DifferFsVerityDisabled = iota + + // DifferFsVerityEnabled means fs-verity is used when supported + DifferFsVerityEnabled + + // DifferFsVerityRequired means fs-verity is required + DifferFsVerityRequired +) + // DifferOptions overrides how the differ work type DifferOptions struct { // Format defines the destination directory layout format Format DifferOutputFormat + + // UseFsVerity defines whether fs-verity is used + UseFsVerity DifferFsVerity } // Differ defines the interface for using a custom differ. @@ -231,8 +249,8 @@ type DriverWithDiffer interface { // ApplyDiffWithDiffer applies the changes using the callback function. // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error) - // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. - ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error + // ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory. + ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error // DifferTarget gets the location where files are stored for the layer. diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index ed9287d6..baa9d7be 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -7,15 +7,13 @@ import ( "encoding/binary" "errors" "fmt" - "io/fs" "os" "os/exec" "path/filepath" "sync" - "syscall" - "unsafe" "github.com/containers/storage/pkg/chunked/dump" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/loopback" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -34,72 +32,6 @@ func getComposeFsHelper() (string, error) { return composeFsHelperPath, composeFsHelperErr } -func enableVerity(description string, fd int) error { - enableArg := unix.FsverityEnableArg{ - Version: 1, - Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, - Block_size: 4096, - } - - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) - if e1 != 0 && !errors.Is(e1, unix.EEXIST) { - return fmt.Errorf("failed to enable verity for %q: %w", description, e1) - } - return nil -} - -type verityDigest struct { - Fsv unix.FsverityDigest - Buf [64]byte -} - -func measureVerity(description string, fd int) (string, error) { - var digest verityDigest - digest.Fsv.Size = 64 - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) - if e1 != 0 { - return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) - } - return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil -} - -func enableVerityRecursive(root string) (map[string]string, error) { - digests := make(map[string]string) - walkFn := func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if !d.Type().IsRegular() { - return nil - } - - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - if err := enableVerity(path, int(f.Fd())); err != nil { - return err - } - - verity, err := measureVerity(path, int(f.Fd())) - if err != nil { - return err - } - - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - - digests[relPath] = verity - return nil - } - err := filepath.WalkDir(root, walkFn) - return digests, err -} - func getComposefsBlob(dataDir string) string { return filepath.Join(dataDir, "composefs.blob") } @@ -151,7 +83,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com return err } - if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { logrus.Warningf("%s", err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 8cc33e15..f007aa94 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -82,7 +82,8 @@ const ( lowerFile = "lower" maxDepth = 500 - tocArtifact = "toc" + tocArtifact = "toc" + fsVerityDigestsArtifact = "fs-verity-digests" // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -295,7 +296,7 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // a bunch of network file systems... case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs, graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS, - graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, + graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP, graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS, graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS, @@ -309,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. // If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - // If custom --imagestore is selected never - // ditch the original graphRoot, instead add it as - // additionalImageStore so its images can still be - // read and used. - if options.ImageStore != "" { - graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore) - options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore) - // complete base name with driver name included - options.ImageStore = filepath.Join(options.ImageStore, "overlay") - } opts, err := parseOptions(options.DriverOptions) if err != nil { return nil, err @@ -862,22 +853,15 @@ func (d *Driver) Status() [][2]string { // Metadata returns meta data about the overlay driver such as // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { - dir, imagestore, _ := d.dir2(id) + dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } - workDirBase := dir - if imagestore != "" { - if _, err := os.Stat(dir); err != nil { - return nil, err - } - workDirBase = imagestore - } metadata := map[string]string{ - "WorkDir": path.Join(workDirBase, "work"), - "MergedDir": path.Join(workDirBase, "merged"), - "UpperDir": path.Join(workDirBase, "diff"), + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), } lowerDirs, err := d.getLowerDirs(id) @@ -895,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) { // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { - _ = os.RemoveAll(d.getStagingDir()) + _ = os.RemoveAll(filepath.Join(d.home, stagingDir)) return mount.Unmount(d.home) } @@ -991,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr return d.create(id, parent, opts, true) } -func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { - dir, imageStore, _ := d.dir2(id) +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) { + dir, homedir, _ := d.dir2(id, readOnly) + + disableQuota := readOnly uidMaps := d.uidMaps gidMaps := d.gidMaps @@ -1003,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil { return err } @@ -1020,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil { return err } - workDirBase := dir - if imageStore != "" { - workDirBase = imageStore - if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil { - return err - } - } if parent != "" { - parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) - // If parentBase path is additional image store, select the image contained in parentBase. - // See https://github.com/containers/podman/issues/19748 - if parentImageStore != "" && !inAdditionalStore { - parentBase = parentImageStore - } + parentBase := d.dir(parent) st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err @@ -1054,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil { return err } - if imageStore != "" { - if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil { - return err - } - } defer func() { // Clean up on failure @@ -1066,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err2 := os.RemoveAll(dir); err2 != nil { logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) } - if imageStore != "" { - if err2 := os.RemoveAll(workDirBase); err2 != nil { - logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2) - } - } } }() @@ -1093,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := d.quotaCtl.SetQuota(dir, quota); err != nil { return err } - if imageStore != "" { - if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil { - return err - } - } } perms := defaultPerms @@ -1106,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } if parent != "" { - parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) - // If parentBase path is additional image store, select the image contained in parentBase. - // See https://github.com/containers/podman/issues/19748 - if parentImageStore != "" && !inAdditionalStore { - parentBase = parentImageStore - } + parentBase := d.dir(parent) st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err @@ -1119,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable perms = os.FileMode(st.Mode()) } - if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { return err } lid := generateID(idLength) linkBase := path.Join("..", id, "diff") - if imageStore != "" { - linkBase = path.Join(imageStore, "diff") - } - if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil { + if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil { return err } @@ -1138,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable return err } - if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil { return err } - if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil { return err } @@ -1223,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { - p, _, _ := d.dir2(id) + p, _, _ := d.dir2(id, false) return p } -func (d *Driver) dir2(id string) (string, string, bool) { - newpath := path.Join(d.home, id) - imageStore := "" +func (d *Driver) getAllImageStores() []string { + additionalImageStores := d.AdditionalImageStores() if d.imageStore != "" { - imageStore = path.Join(d.imageStore, id) + additionalImageStores = append([]string{d.imageStore}, additionalImageStores...) } + return additionalImageStores +} + +func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { + var homedir string + + if useImageStore && d.imageStore != "" { + homedir = path.Join(d.imageStore, d.name) + } else { + homedir = d.home + } + + newpath := path.Join(homedir, id) + if _, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { + for _, p := range d.getAllImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l, imageStore, true + return l, homedir, true } } } - return newpath, imageStore, false + return newpath, homedir, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1452,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - dir, imageStore, inAdditionalStore := d.dir2(id) + dir, _, inAdditionalStore := d.dir2(id, false) if _, err := os.Stat(dir); err != nil { return "", err } - workDirBase := dir - if imageStore != "" { - workDirBase = imageStore - } + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { @@ -1564,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO }() composeFsLayers := []string{} - composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers") + composeFsLayersDir := filepath.Join(dir, "composefs-layers") maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) _, err = os.Stat(composefsBlob) @@ -1598,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return dest, nil } - diffDir := path.Join(workDirBase, "diff") + diffDir := path.Join(dir, "diff") if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil { return "", err @@ -1616,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO lower := "" newpath := path.Join(d.home, l) if st, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { + for _, p := range d.getAllImageStores() { lower = path.Join(p, d.name, l) if st2, err2 := os.Stat(lower); err2 == nil { if !permsKnown { @@ -1684,21 +1645,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO optsList = append(optsList, "metacopy=on", "redirect_dir=on") } - if len(absLowers) == 0 { - absLowers = append(absLowers, path.Join(dir, "empty")) - } - // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) if err != nil { return "", err } - if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { - return "", err + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) } - mergedDir := path.Join(workDirBase, "merged") + if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { + if !inAdditionalStore { + return "", err + } + // if it is in an additional store, do not fail if the directory already exists + if _, err2 := os.Stat(diffDir); err2 != nil { + return "", err + } + } + + mergedDir := path.Join(dir, "merged") // Create the driver merged dir if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err @@ -1716,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } }() - workdir := path.Join(workDirBase, "work") + workdir := path.Join(dir, "work") if d.options.mountProgram == "" && unshare.IsRootless() { optsList = append(optsList, "userxattr") @@ -1866,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - dir := d.dir(id) + dir, _, inAdditionalStore := d.dir2(id, false) if _, err := os.Stat(dir); err != nil { return err } @@ -1927,11 +1894,27 @@ func (d *Driver) Put(id string) error { } } - if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { - logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) - return fmt.Errorf("removing mount point %q: %w", mountpoint, err) - } + if !inAdditionalStore { + uid, gid := int(0), int(0) + fi, err := os.Stat(mountpoint) + if err != nil { + return err + } + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + uid, gid = int(stat.Uid), int(stat.Gid) + } + tmpMountpoint := path.Join(dir, "merged.1") + if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { + return err + } + // rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains + // its atomic semantic. In this way the "merged" directory is never removed. + if err := unix.Rename(tmpMountpoint, mountpoint); err != nil { + logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("replacing mount point %q: %w", mountpoint, err) + } + } return nil } @@ -2019,8 +2002,9 @@ func (g *overlayFileGetter) Close() error { return nil } -func (d *Driver) getStagingDir() string { - return filepath.Join(d.home, stagingDir) +func (d *Driver) getStagingDir(id string) string { + _, homedir, _ := d.dir2(id, d.imageStore != "") + return filepath.Join(homedir, stagingDir) } // DiffGetter returns a FileGetCloser that can read files from the directory that @@ -2077,15 +2061,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App var applyDir string if id == "" { - err := os.MkdirAll(d.getStagingDir(), 0o700) + stagingDir := d.getStagingDir(id) + err := os.MkdirAll(stagingDir, 0o700) if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } - applyDir, err = os.MkdirTemp(d.getStagingDir(), "") + applyDir, err = os.MkdirTemp(stagingDir, "") if err != nil { return graphdriver.DriverWithDifferOutput{}, err } - + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + if err := os.Chmod(applyDir, perms); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } } else { var err error applyDir, err = d.getDiffPath(id) @@ -2101,6 +2092,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App } if d.usingComposefs { differOptions.Format = graphdriver.DifferOutputFormatFlat + differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled } out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ UIDMaps: idMappings.UIDs(), @@ -2116,27 +2108,38 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App } // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. -func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error { - if filepath.Dir(stagingDirectory) != d.getStagingDir() { +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error { + stagingDirectory := diffOutput.Target + if filepath.Dir(stagingDirectory) != d.getStagingDir(id) { return fmt.Errorf("%q is not a staging directory", stagingDirectory) } - - if d.usingComposefs { - // FIXME: move this logic into the differ so we don't have to open - // the file twice. - verityDigests, err := enableVerityRecursive(stagingDirectory) - if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { - logrus.Warningf("%s", err) - } - toc := diffOutput.Artifacts[tocArtifact] - if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { - return err - } - } diffPath, err := d.getDiffPath(id) if err != nil { return err } + + // If the current layer doesn't set the mode for the parent, override it with the parent layer's mode. + if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" { + parentDiffPath, err := d.getDiffPath(parent) + if err != nil { + return err + } + parentSt, err := os.Stat(parentDiffPath) + if err != nil { + return err + } + if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil { + return err + } + } + + if d.usingComposefs { + toc := diffOutput.Artifacts[tocArtifact] + verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string) + if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { + return err + } + } if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) { return err } @@ -2193,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string { } func (d *Driver) getDiffPath(id string) (string, error) { - dir, imagestore, _ := d.dir2(id) - base := dir - if imagestore != "" { - base = imagestore - } - return redirectDiffIfAdditionalLayer(path.Join(base, "diff")) + dir := d.dir(id) + return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) } func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { @@ -2289,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string { // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { var err error - dir, imagestore, _ := d.dir2(id) - base := dir - if imagestore != "" { - base = imagestore - } - diffDir := filepath.Join(base, "diff") + dir := d.dir(id) + diffDir := filepath.Join(dir, "diff") rootUID, rootGID := 0, 0 if toHost != nil { diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go index 2a7a307a..d4f540c9 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -4,6 +4,7 @@ package overlay import ( + "fmt" "path" "github.com/containers/storage/pkg/directory" @@ -15,3 +16,15 @@ import ( func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { return directory.Usage(path.Join(d.dir(id), "diff")) } + +func getComposeFsHelper() (string, error) { + return "", fmt.Errorf("composefs not supported on this build") +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + return fmt.Errorf("composefs not supported on this build") +} + +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { + return fmt.Errorf("composefs not supported on this build") +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 599cf095..9b552254 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -31,8 +31,9 @@ func init() { func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { d := &Driver{ name: "vfs", - homes: []string{home}, + home: home, idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + imageStore: options.ImageStore, } rootIDs := d.idMappings.RootPair() @@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) key = strings.ToLower(key) switch key { case "vfs.imagestore", ".imagestore": - d.homes = append(d.homes, strings.Split(val, ",")...) + d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...) continue case "vfs.mountopt": return nil, fmt.Errorf("vfs driver does not support mount options") @@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, fmt.Errorf("vfs driver does not support %s options", key) } } - // If --imagestore is provided, lets add writable graphRoot - // to vfs's additional image store, as it is done for - // `overlay` driver. - if options.ImageStore != "" { - d.homes = append(d.homes, options.ImageStore) - } + d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) @@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { name string - homes []string + home string + additionalHomes []string idMappings *idtools.IDMappings ignoreChownErrors bool naiveDiff graphdriver.DiffDriver updater graphdriver.LayerIDMapUpdater + imageStore string } func (d *Driver) String() string { @@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool idMappings = opts.IDMappings } - dir := d.dir(id) + dir := d.dir2(id, ro) rootIDs := idMappings.RootPair() if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil { return err @@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool return nil } -func (d *Driver) dir(id string) string { - for i, home := range d.homes { - if i > 0 { - home = filepath.Join(home, d.String()) +func (d *Driver) dir2(id string, useImageStore bool) string { + var homedir string + + if useImageStore && d.imageStore != "" { + homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id)) + } else { + homedir = filepath.Join(d.home, "dir", filepath.Base(id)) + } + if _, err := os.Stat(homedir); err != nil { + additionalHomes := d.additionalHomes[:] + if d.imageStore != "" { + additionalHomes = append(additionalHomes, d.imageStore) } - candidate := filepath.Join(home, "dir", filepath.Base(id)) - fi, err := os.Stat(candidate) - if err == nil && fi.IsDir() { - return candidate + for _, home := range additionalHomes { + candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } } } - return filepath.Join(d.homes[0], "dir", filepath.Base(id)) + return homedir +} + +func (d *Driver) dir(id string) string { + return d.dir2(id, false) } // Remove deletes the content from the directory for a given id. @@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool { // List layers (not including additional image stores) func (d *Driver) ListLayers() ([]string, error) { - entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir")) + entries, err := os.ReadDir(filepath.Join(d.home, "dir")) if err != nil { return nil, err } @@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - if len(d.homes) > 1 { - return d.homes[1:] + if len(d.additionalHomes) > 0 { + return d.additionalHomes } return nil } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 8dfce5e3..f1325262 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -181,6 +181,13 @@ type DiffOptions struct { Compression *archive.Compression } +// stagedLayerOptions are the options passed to .create to populate a staged +// layer +type stagedLayerOptions struct { + DiffOutput *drivers.DriverWithDifferOutput + DiffOptions *drivers.ApplyDiffWithDifferOpts +} + // roLayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. @@ -267,7 +274,7 @@ type rwLayerStore interface { // underlying drivers do not themselves distinguish between writeable // and read-only layers. Returns the new layer structure and the size of the // diff which was applied to its parent to initialize its contents. - create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error) + create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) // updateNames modifies names associated with a layer based on (op, names). updateNames(id string, names []string, op updateNameOperation) error @@ -312,8 +319,8 @@ type rwLayerStore interface { // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error - // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. - ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error + // applyDiffFromStagingDirectory uses diffOutput.Target to create the diff. + applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error // DifferTarget gets the location where files are stored for the layer. DifferTarget(id string) (string, error) @@ -327,10 +334,71 @@ type rwLayerStore interface { GarbageCollect() error } +type multipleLockFile struct { + lockfiles []*lockfile.LockFile +} + +func (l multipleLockFile) Lock() { + for _, lock := range l.lockfiles { + lock.Lock() + } +} + +func (l multipleLockFile) RLock() { + for _, lock := range l.lockfiles { + lock.RLock() + } +} + +func (l multipleLockFile) Unlock() { + for _, lock := range l.lockfiles { + lock.Unlock() + } +} + +func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) { + // Look up only the first lockfile, since this is the value returned by RecordWrite(). + return l.lockfiles[0].ModifiedSince(lastWrite) +} + +func (l multipleLockFile) AssertLockedForWriting() { + for _, lock := range l.lockfiles { + lock.AssertLockedForWriting() + } +} + +func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) { + return l.lockfiles[0].GetLastWrite() +} + +func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) { + var lastWrite *lockfile.LastWrite + for _, lock := range l.lockfiles { + lw, err := lock.RecordWrite() + if err != nil { + return lw, err + } + // Return the first value we get so we know that + // all the locks have a write time >= to this one. + if lastWrite == nil { + lastWrite = &lw + } + } + return *lastWrite, nil +} + +func (l multipleLockFile) IsReadWrite() bool { + return l.lockfiles[0].IsReadWrite() +} + +func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile { + return &multipleLockFile{lockfiles: l} +} + type layerStore struct { // The following fields are only set when constructing layerStore, and must never be modified afterwards. // They are safe to access without any other locking. - lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. + lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held. rundir string jsonPath [numLayerLocationIndex]string @@ -1016,22 +1084,37 @@ func (r *layerStore) saveMounts() error { return r.loadMounts() } -func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { +func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { if err := os.MkdirAll(rundir, 0o700); err != nil { return nil, err } if err := os.MkdirAll(layerdir, 0o700); err != nil { return nil, err } + if imagedir != "" { + if err := os.MkdirAll(imagedir, 0o700); err != nil { + return nil, err + } + } // Note: While the containers.lock file is in rundir for transient stores // we don't want to do this here, because the non-transient layers in // layers.json might be used externally as a read-only layer (using e.g. // additionalimagestores), and that would look for the lockfile in the // same directory + var lockFiles []*lockfile.LockFile lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err } + lockFiles = append(lockFiles, lockFile) + if imagedir != "" { + lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock")) + if err != nil { + return nil, err + } + lockFiles = append(lockFiles, lockFile) + } + mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock")) if err != nil { return nil, err @@ -1041,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri volatileDir = rundir } rlstore := layerStore{ - lockfile: lockFile, + lockfile: newMultipleLockFile(lockFiles...), mountsLockfile: mountsLockfile, rundir: rundir, jsonPath: [numLayerLocationIndex]string{ @@ -1078,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL return nil, err } rlstore := layerStore{ - lockfile: lockfile, + lockfile: newMultipleLockFile(lockfile), mountsLockfile: nil, rundir: rundir, jsonPath: [numLayerLocationIndex]string{ @@ -1232,7 +1315,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s } // Requires startWriting. -func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) { if moreOptions == nil { moreOptions = &LayerOptions{} } @@ -1426,6 +1509,11 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount cleanupFailureContext = "applying layer diff" return nil, -1, err } + } else if slo != nil { + if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil { + cleanupFailureContext = "applying staged directory diff" + return nil, -1, err + } } else { // applyDiffWithOptions() would have updated r.bycompressedsum // and r.byuncompressedsum for us, but if we used a template @@ -2286,7 +2374,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if layerOptions != nil && layerOptions.UncompressedDigest != "" && layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { uncompressedDigest = layerOptions.UncompressedDigest - } else { + } else if compression != archive.Uncompressed { uncompressedDigester = digest.Canonical.Digester() } @@ -2365,10 +2453,17 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if uncompressedDigester != nil { uncompressedDigest = uncompressedDigester.Digest() } + if uncompressedDigest == "" && compression == archive.Uncompressed { + uncompressedDigest = compressedDigest + } updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID) layer.CompressedDigest = compressedDigest - layer.CompressedSize = compressedCounter.Count + if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil { + layer.CompressedSize = *layerOptions.OriginalSize + } else { + layer.CompressedSize = compressedCounter.Count + } updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID) layer.UncompressedDigest = uncompressedDigest layer.UncompressedSize = uncompressedCounter.Count @@ -2407,7 +2502,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) { } // Requires startWriting. -func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { +func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return ErrNotSupported @@ -2426,7 +2521,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, } } - err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options) + err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options) if err != nil { return err } @@ -2446,6 +2541,10 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, layer.Flags[k] = v } } + if err = r.saveFor(layer); err != nil { + return err + } + if len(diffOutput.TarSplit) != 0 { tsdata := bytes.Buffer{} compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) @@ -2475,9 +2574,6 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, return err } } - if err = r.saveFor(layer); err != nil { - return err - } return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 51d0a66e..70f76d66 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -339,12 +339,43 @@ func (compression *Compression) Extension() string { return "" } +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi. +var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error + +func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + if sysStatOverride == nil { + return tar.FileInfoHeader(fi, link) + } + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + return hdr, sysStatOverride(fi, hdr) +} + // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) + hdr, err := fileInfoHeaderNoLookups(fi, link) if err != nil { return nil, err } @@ -385,7 +416,7 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { return err } for _, key := range xattrs { - if strings.HasPrefix(key, "user.") { + if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") { value, err := system.Lgetxattr(path, key) if err != nil { if errors.Is(err, system.E2BIG) { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index 88192f22..c6811031 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -15,6 +15,31 @@ import ( "golang.org/x/sys/unix" ) +func init() { + sysStatOverride = statUnix +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +// Adapted from Moby. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} + // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index aa4f57e6..1e3ad86d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -25,7 +25,7 @@ import ( const ( cacheKey = "chunked-manifest-cache" - cacheVersion = 1 + cacheVersion = 2 digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) @@ -207,9 +207,9 @@ func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { return string(digester.Digest()), nil } -// generateFileLocation generates a file location in the form $OFFSET@$PATH -func generateFileLocation(path string, offset uint64) []byte { - return []byte(fmt.Sprintf("%d@%s", offset, path)) +// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH +func generateFileLocation(path string, offset, len uint64) []byte { + return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path)) } // generateTag generates a tag in the form $DIGEST$OFFSET@LEN. @@ -245,7 +245,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin var tags []string for _, k := range toc { if k.Digest != "" { - location := generateFileLocation(k.Name, 0) + location := generateFileLocation(k.Name, 0, uint64(k.Size)) off := uint64(vdata.Len()) l := uint64(len(location)) @@ -276,7 +276,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin digestLen = len(k.Digest) } if k.ChunkDigest != "" { - location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize)) off := uint64(vdata.Len()) l := uint64(len(location)) d := generateTag(k.ChunkDigest, off, l) @@ -490,7 +490,9 @@ func findTag(digest string, metadata *metadata) (string, uint64, uint64) { if digest == d { startOff := i*metadata.tagLen + metadata.digestLen parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) return digest, uint64(off), uint64(len) } @@ -507,12 +509,16 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, defer c.mutex.RUnlock() for _, layer := range c.layers { - digest, off, len := findTag(digest, layer.metadata) + digest, off, tagLen := findTag(digest, layer.metadata) if digest != "" { - position := string(layer.metadata.vdata[off : off+len]) - parts := strings.SplitN(position, "@", 2) + position := string(layer.metadata.vdata[off : off+tagLen]) + parts := strings.SplitN(position, ":", 3) + if len(parts) != 3 { + continue + } offFile, _ := strconv.ParseInt(parts[0], 10, 64) - return layer.target, parts[1], offFile, nil + // parts[1] is the chunk length, currently unused. + return layer.target, parts[2], offFile, nil } } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 1d8141e3..112ca2c7 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -257,8 +257,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann return decodedBlob, decodedTarSplit, int64(footerData.Offset), err } -func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) { - d, err := digest.Parse(expectedUncompressedChecksum) +func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { + d, err := digest.Parse(expectedCompressedChecksum) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index 5e569682..d3c105c4 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "io" + "path/filepath" "strings" "time" "unicode" @@ -93,13 +94,18 @@ func getStMode(mode uint32, typ string) (uint32, error) { return mode, nil } -func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { - path := strings.TrimRight(entry.Name, "/") - if path == "" { +func sanitizeName(name string) string { + path := filepath.Clean(name) + if path == "." { path = "/" } else if path[0] != '/' { path = "/" + path } + return path +} + +func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { + path := sanitizeName(entry.Name) if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { return err @@ -133,9 +139,10 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri var payload string if entry.Linkname != "" { - payload = entry.Linkname - if entry.Type == internal.TypeLink && payload[0] != '/' { - payload = "/" + payload + if entry.Type == internal.TypeSymlink { + payload = entry.Linkname + } else { + payload = sanitizeName(entry.Linkname) } } else { if len(entry.Digest) > 10 { @@ -198,10 +205,13 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, if e.Linkname == "" { continue } + if e.Type == internal.TypeSymlink { + continue + } links[e.Linkname] = links[e.Linkname] + 1 } - if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") { + if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") { root := &internal.FileMetadata{ Name: "/", Type: internal.TypeDir, diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 8c54a5ae..048889c3 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" @@ -46,6 +47,7 @@ const ( chunkedData = "zstd-chunked-data" chunkedLayerDataKey = "zstd-chunked-layer-data" tocKey = "toc" + fsVerityDigestsKey = "fs-verity-digests" fileTypeZstdChunked = iota fileTypeEstargz @@ -71,11 +73,9 @@ type chunkedDiffer struct { zstdReader *zstd.Decoder rawReader io.Reader - // contentDigest is the digest of the uncompressed content - // (diffID) when the layer is fully retrieved. If the layer - // is not fully retrieved, instead of using the digest of the - // uncompressed content, it refers to the digest of the TOC. - contentDigest digest.Digest + // tocDigest is the digest of the TOC document when the layer + // is partially pulled. + tocDigest digest.Digest // convertedToZstdChunked is set to true if the layer needs to // be converted to the zstd:chunked format before it can be @@ -94,6 +94,10 @@ type chunkedDiffer struct { blobSize int64 storeOpts *types.StoreOptions + + useFsVerity graphdriver.DifferFsVerity + fsVerityDigests map[string]string + fsVerityMutex sync.Mutex } var xattrsToIgnore = map[string]interface{}{ @@ -237,6 +241,10 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges return nil, err } + if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) { + return nil, errors.New("enable_partial_images not configured") + } + _, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] _, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] @@ -265,6 +273,7 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige } return &chunkedDiffer{ + fsVerityDigests: make(map[string]string), blobDigest: blobDigest, blobSize: blobSize, convertToZstdChunked: true, @@ -285,22 +294,23 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } - contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) + tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeZstdChunked, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tarSplit: tarSplit, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeZstdChunked, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tarSplit: tarSplit, + tocOffset: tocOffset, }, nil } @@ -314,21 +324,22 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } - contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeEstargz, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeEstargz, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tocOffset: tocOffset, }, nil } @@ -925,6 +936,8 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT return nil } +type recordFsVerityFunc func(string, *os.File) error + type destinationFile struct { digester digest.Digester dirfd int @@ -934,9 +947,10 @@ type destinationFile struct { options *archive.TarOptions skipValidation bool to io.Writer + recordFsVerity recordFsVerityFunc } -func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err @@ -963,15 +977,32 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar options: options, dirfd: dirfd, skipValidation: skipValidation, + recordFsVerity: recordFsVerity, }, nil } func (d *destinationFile) Close() (Err error) { defer func() { - err := d.file.Close() + var roFile *os.File + var err error + + if d.recordFsVerity != nil { + roFile, err = reopenFileReadOnly(d.file) + if err == nil { + defer roFile.Close() + } else if Err == nil { + Err = err + } + } + + err = d.file.Close() if Err == nil { Err = err } + + if Err == nil && roFile != nil { + Err = d.recordFsVerity(d.metadata.Name, roFile) + } }() if !d.skipValidation { @@ -994,6 +1025,35 @@ func closeDestinationFiles(files chan *destinationFile, errors chan error) { close(errors) } +func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error { + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + return nil + } + // fsverity.EnableVerity doesn't return an error if fs-verity was already + // enabled on the file. + err := fsverity.EnableVerity(path, int(roFile.Fd())) + if err != nil { + if c.useFsVerity == graphdriver.DifferFsVerityRequired { + return err + } + + // If it is not required, ignore the error if the filesystem does not support it. + if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) { + return nil + } + } + verity, err := fsverity.MeasureVerity(path, int(roFile.Fd())) + if err != nil { + return err + } + + c.fsVerityMutex.Lock() + c.fsVerityDigests[path] = verity + c.fsVerityMutex.Unlock() + + return nil +} + func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { var destFile *destinationFile @@ -1081,7 +1141,11 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } filesToClose <- destFile } - destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation) + recordFsVerity := c.recordFsVerity + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + recordFsVerity = nil + } + destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity) if err != nil { Err = err goto exit @@ -1412,15 +1476,39 @@ type findAndCopyFileOptions struct { options *archive.TarOptions } +func reopenFileReadOnly(f *os.File) (*os.File, error) { + path := fmt.Sprintf("/proc/self/fd/%d", f.Fd()) + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { - return err - } + if dstFile == nil { + return nil } - return nil + err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false) + if err != nil { + dstFile.Close() + return err + } + var roFile *os.File + if c.useFsVerity != graphdriver.DifferFsVerityDisabled { + roFile, err = reopenFileReadOnly(dstFile) + } + dstFile.Close() + if err != nil { + return err + } + if roFile == nil { + return nil + } + + defer roFile.Close() + return c.recordFsVerity(r.Name, roFile) } found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) @@ -1522,9 +1610,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } }() + c.useFsVerity = differOpts.UseFsVerity + // stream to use for reading the zstd:chunked or Estargz file. stream := c.stream + var uncompressedDigest digest.Digest + if c.convertToZstdChunked { fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) if err != nil { @@ -1575,13 +1667,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff c.fileType = fileTypeZstdChunked c.manifest = manifest c.tarSplit = tarSplit - - // since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest. - c.contentDigest = diffID c.tocOffset = tocOffset // the file was generated by us and the digest for each file was already computed, no need to validate it again. c.skipValidation = true + // since we retrieved the whole file and it was validated, set the uncompressed digest. + uncompressedDigest = diffID } lcd := chunkedLayerData{ @@ -1610,11 +1701,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Artifacts: map[string]interface{}{ tocKey: toc, }, - TOCDigest: c.contentDigest, - } - - if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) { - return output, errors.New("enable_partial_images not configured") + TOCDigest: c.tocDigest, + UncompressedDigest: uncompressedDigest, } // When the hard links deduplication is used, file attributes are ignored because setting them @@ -1731,13 +1819,17 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff mode := os.FileMode(r.Mode) - r.Name = filepath.Clean(r.Name) - r.Linkname = filepath.Clean(r.Linkname) - t, err := typeToTarType(r.Type) if err != nil { return output, err } + + r.Name = filepath.Clean(r.Name) + // do not modify the value of symlinks + if r.Linkname != "" && t != tar.TypeSymlink { + r.Linkname = filepath.Clean(r.Linkname) + } + if whiteoutConverter != nil { hdr := archivetar.Header{ Typeflag: t, @@ -1783,6 +1875,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } case tar.TypeDir: + if r.Name == "" || r.Name == "." { + output.RootDirMode = &mode + } if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } @@ -1920,6 +2015,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } + output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests + return output, nil } @@ -1979,7 +2076,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i e.Chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { - e.Chunks[j] = &entries[i+j] + // we need a copy here, otherwise we override the + // .Size later + copy := entries[i+j] + e.Chunks[j] = © e.EndOffset = entries[i+j].EndOffset } i += nChunks diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go index 9cfd97d8..6fbaa41b 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go +++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go @@ -1,6 +1,8 @@ package toc import ( + "errors" + "github.com/containers/storage/pkg/chunked/internal" digest "github.com/opencontainers/go-digest" ) @@ -16,19 +18,24 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" // table of contents (TOC) from the image's annotations. // This is an experimental feature and may be changed/removed in the future. func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { - if contentDigest, ok := annotations[tocJSONDigestAnnotation]; ok { - d, err := digest.Parse(contentDigest) + d1, ok1 := annotations[tocJSONDigestAnnotation] + d2, ok2 := annotations[internal.ManifestChecksumKey] + switch { + case ok1 && ok2: + return nil, errors.New("both zstd:chunked and eStargz TOC found") + case ok1: + d, err := digest.Parse(d1) if err != nil { return nil, err } return &d, nil - } - if contentDigest, ok := annotations[internal.ManifestChecksumKey]; ok { - d, err := digest.Parse(contentDigest) + case ok2: + d, err := digest.Parse(d2) if err != nil { return nil, err } return &d, nil + default: + return nil, nil } - return nil, nil } diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go new file mode 100644 index 00000000..5b21c4b7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go @@ -0,0 +1,45 @@ +package fsverity + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// verityDigest struct represents the digest used for verifying the integrity of a file. +type verityDigest struct { + Fsv unix.FsverityDigest + Buf [64]byte +} + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + enableArg := unix.FsverityEnableArg{ + Version: 1, + Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, + Block_size: 4096, + } + + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) + if e1 != 0 && !errors.Is(e1, unix.EEXIST) { + return fmt.Errorf("failed to enable verity for %q: %w", description, e1) + } + return nil +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + var digest verityDigest + digest.Fsv.Size = 64 + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) + if e1 != 0 { + return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) + } + return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go new file mode 100644 index 00000000..46e68c57 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go @@ -0,0 +1,21 @@ +//go:build !linux +// +build !linux + +package fsverity + +import ( + "fmt" +) + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + return fmt.Errorf("fs-verity is not supported on this platform") +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + return "", fmt.Errorf("fs-verity is not supported on this platform") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index b02812e6..9057fe1b 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -1,5 +1,5 @@ -//go:build !linux && !darwin && !freebsd -// +build !linux,!darwin,!freebsd +//go:build !linux && !darwin && !freebsd && !windows +// +build !linux,!darwin,!freebsd,!windows package homedir diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index af65f2c0..a76610f9 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -5,6 +5,7 @@ package homedir import ( "os" + "path/filepath" ) // Key returns the env var name for the user's home dir based on @@ -25,8 +26,36 @@ func Get() string { return home } +// GetConfigHome returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func GetConfigHome() (string, error) { + return filepath.Join(Get(), ".config"), nil +} + // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { return "%USERPROFILE%" // be careful while using in format functions } + +// StickRuntimeDirContents is a no-op on Windows +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, nil +} + +// GetRuntimeDir returns a directory suitable to store runtime files. +// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable +// directory for the current user. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + data, err := GetDataHome() + if err != nil { + return "", err + } + runtimeDir := filepath.Join(data, "containers", "storage") + return runtimeDir, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 4701dc5a..d7cb4ac2 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -14,7 +14,7 @@ import ( "syscall" "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" ) var ( diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index d91fa98f..924e8f13 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -59,7 +59,7 @@ additionalimagestores = [ # can deduplicate pulling of content, disk storage of content and can allow the # kernel to use less memory when running containers. -# containers/storage supports three keys +# containers/storage supports four keys # * enable_partial_images="true" | "false" # Tells containers/storage to look for files previously pulled in storage # rather then always pulling them from the container registry. @@ -70,7 +70,12 @@ additionalimagestores = [ # Tells containers/storage where an ostree repository exists that might have # previously pulled content which can be used when attempting to avoid # pulling content from the container registry -pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""} +# * convert_images = "false" | "true" +# If set to true, containers/storage will convert images to a +# format compatible with partial pulls in order to take advantage +# of local deduplication and hard linking. It is an expensive +# operation so it is not enabled by default. +pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""} # Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of # a container, to the UIDs/GIDs as they should appear outside of the container, diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 49a4ff11..c6f12518 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -71,6 +71,19 @@ type metadataStore interface { rwMetadataStore } +// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer +type ApplyStagedLayerOptions struct { + ID string // Mandatory + ParentLayer string // Optional + Names []string // Optional + MountLabel string // Optional + Writeable bool // Optional + LayerOptions *LayerOptions // Optional + + DiffOutput *drivers.DriverWithDifferOutput // Mandatory + DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory +} + // An roBigDataStore wraps up the read-only big-data related methods of the // various types of file-based lookaside stores that we implement. type roBigDataStore interface { @@ -318,11 +331,21 @@ type Store interface { ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + // Deprecated: it will be removed soon. Use ApplyStagedLayer instead. ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + // Deprecated: it will be removed soon. Use CleanupStagedLayer instead. CleanupStagingDirectory(stagingDirectory string) error + // ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) + + // CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error + // DifferTarget gets the path to the differ target. DifferTarget(id string) (string, error) @@ -397,6 +420,18 @@ type Store interface { // allow ImagesByDigest to find images by their correct digests. SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + // ImageDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the image is + // deleted. + ImageDirectory(id string) (string, error) + + // ImageRunDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the host system + // is restarted. + ImageRunDirectory(id string) (string, error) + // ListLayerBigData retrieves a list of the (possibly large) chunks of // named data associated with a layer. ListLayerBigData(id string) ([]string, error) @@ -568,10 +603,19 @@ type LayerOptions struct { // initialize this layer. If set, it should be a child of the layer // which we want to use as the parent of the new layer. TemplateLayer string - // OriginalDigest specifies a digest of the tarstream (diff), if one is + // OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is // provided along with these LayerOptions, and reliably known by the caller. + // The digest might not be exactly the digest of the provided tarstream + // (e.g. the digest might be of a compressed representation, while providing + // an uncompressed one); in that case the caller is responsible for the two matching. // Use the default "" if this fields is not applicable or the value is not known. OriginalDigest digest.Digest + // OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding + // to OriginalDigest. + // If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest, + // not the tarstream. + // Use nil if not applicable or not known. + OriginalSize *int64 // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”) // of the tarstream (diff), if one is provided along with these LayerOptions, // and reliably known by the caller. @@ -928,11 +972,13 @@ func (s *store) load() error { if err := os.MkdirAll(gipath, 0o700); err != nil { return err } - ris, err := newImageStore(gipath) + imageStore, err := newImageStore(gipath) if err != nil { return err } - s.imageStore = ris + s.imageStore = imageStore + + s.rwImageStores = []rwImageStore{imageStore} gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") if err := os.MkdirAll(gcpath, 0o700); err != nil { @@ -950,13 +996,16 @@ func (s *store) load() error { s.containerStore = rcs - for _, store := range driver.AdditionalImageStores() { + additionalImageStores := s.graphDriver.AdditionalImageStores() + if s.imageStoreDir != "" { + additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...) + } + + for _, store := range additionalImageStores { gipath := filepath.Join(store, driverPrefix+"images") var ris roImageStore - if s.imageStoreDir != "" && store == s.graphRoot { - // If --imagestore was set and current store - // is `graphRoot` then mount it as a `rw` additional - // store instead of `readonly` additional store. + // both the graphdriver and the imagestore must be used read-write. + if store == s.imageStoreDir || store == s.graphRoot { imageStore, err := newImageStore(gipath) if err != nil { return err @@ -1041,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() { // Almost all users should use startUsingGraphDriver instead. // The caller must hold s.graphLock. func (s *store) createGraphDriverLocked() (drivers.Driver, error) { - driverRoot := s.imageStoreDir - imageStoreBase := s.graphRoot - if driverRoot == "" { - driverRoot = s.graphRoot - imageStoreBase = "" - } config := drivers.Options{ - Root: driverRoot, - ImageStore: imageStoreBase, + Root: s.graphRoot, + ImageStore: s.imageStoreDir, RunRoot: s.runRoot, DriverPriority: s.graphDriverPriority, DriverOptions: s.graphOptions, @@ -1079,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) { if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } - imgStoreRoot := s.imageStoreDir - if imgStoreRoot == "" { - imgStoreRoot = s.graphRoot - } - glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers") + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") if err := os.MkdirAll(glpath, 0o700); err != nil { return nil, err } - rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) + ilpath := "" + if s.imageStoreDir != "" { + ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers") + } + rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore) if err != nil { return nil, err } @@ -1118,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } + for _, store := range s.graphDriver.AdditionalImageStores() { glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, s.graphDriver) if err != nil { return nil, err @@ -1400,8 +1445,7 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { return true } -func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { - var parentLayer *Layer +func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) { rlstore, rlstores, err := s.bothLayerStoreKinds() if err != nil { return nil, -1, err @@ -1414,6 +1458,8 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w return nil, -1, err } defer s.containerStore.stopWriting() + + var parentLayer *Layer var options LayerOptions if lOptions != nil { options = *lOptions @@ -1473,6 +1519,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w } layerOptions := LayerOptions{ OriginalDigest: options.OriginalDigest, + OriginalSize: options.OriginalSize, UncompressedDigest: options.UncompressedDigest, Flags: options.Flags, } @@ -1486,7 +1533,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w GIDMap: copyIDMap(gidMap), } } - return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff) + return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo) +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { + return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil) } func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { @@ -1696,7 +1747,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst } } layerOptions.TemplateLayer = layer.ID - mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil) + mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) if err != nil { return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) } @@ -1867,7 +1918,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Flags[mountLabelFlag] = mountLabel } - clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil) + clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil) if err != nil { return nil, err } @@ -2540,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if err := s.writeToAllStores(func(rlstore rwLayerStore) error { // Delete image from all available imagestores configured to be used. imageFound := false - for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) { + for _, is := range s.rwImageStores { if is != s.imageStore { // This is an additional writeable image store // so we must perform lock @@ -2932,15 +2983,28 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro } func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { + if stagingDirectory != diffOutput.Target { + return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory") + } _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { if !rlstore.Exists(to) { return struct{}{}, ErrLayerUnknown } - return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options) }) return err } +func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) { + slo := stagedLayerOptions{ + DiffOutput: args.DiffOutput, + DiffOptions: args.DiffOptions, + } + + layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo) + return layer, err +} + func (s *store) CleanupStagingDirectory(stagingDirectory string) error { _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory) @@ -2948,6 +3012,13 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error { return err } +func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error { + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target) + }) + return err +} + func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { if to != "" && !rlstore.Exists(to) { @@ -3311,6 +3382,27 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { return nil, ErrContainerUnknown } +func (s *store) ImageDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + middleDir := s.graphDriverName + "-images" + gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gipath, 0o700); err != nil { + return "", true, err + } + return gipath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) @@ -3328,6 +3420,28 @@ func (s *store) ContainerDirectory(id string) (string, error) { return res, err } +func (s *store) ImageRunDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + + middleDir := s.graphDriverName + "-images" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return "", true, err + } + return rcpath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerRunDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 32ae830b..57120731 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -11,7 +11,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" - libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + libcontainerUser "github.com/moby/sys/user" "github.com/sirupsen/logrus" ) @@ -175,7 +175,7 @@ outer: // We need to create a temporary layer so we can mount it and lookup the // maximum IDs used. - clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil) + clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil) if err != nil { return 0, err } diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md deleted file mode 100644 index 3305db0f..00000000 --- a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md index 7820c2f4..7ae6cff9 100644 --- a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md @@ -1,6 +1,23 @@ +# v3.0.2 + +## Fixed + + - DecryptMulti: handle decompression error (#19) + +## Changed + + - jwe/CompactSerialize: improve performance (#67) + - Increase the default number of PBKDF2 iterations to 600k (#48) + - Return the proper algorithm for ECDSA keys (#45) + +## Added + + - Add Thumbprint support for opaque signers (#38) + # v3.0.1 -Fixed: +## Fixed + - Security issue: an attacker specifying a large "p2c" value can cause JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md index b90c7e5c..57da6570 100644 --- a/vendor/github.com/go-jose/go-jose/v3/README.md +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -1,15 +1,18 @@ # Go JOSE -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) -[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt) +[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v3)](https://github.com/go-jose/go-jose/actions) Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, JSON Web Signature, and JSON Web Token standards. +**Help Wanted!** If you'd like to help us develop this library please reach +out to css (at) css.bio. While I'm still working on keeping this maintained, +I have limited time for in-depth development and could use some additional help. + **Disclaimer**: This library contains encryption software that is subject to the U.S. Export Administration Regulations. You may not export, re-export, transfer or download this code or any part of it in violation of any United @@ -21,13 +24,13 @@ US maintained blocked list. ## Overview The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. Tables of supported algorithms are shown below. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) for dealing with JOSE messages in a shell. **Note**: We use a forked version of the `encoding/json` package from the Go @@ -38,29 +41,19 @@ libraries in other languages. ### Versions -[Version 2](https://gopkg.in/go-jose/go-jose.v2) -([branch](https://github.com/go-jose/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: - - import "gopkg.in/go-jose/go-jose.v2" - [Version 3](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/tree/master), -[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): +([branch](https://github.com/go-jose/go-jose/tree/v3), +[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v3), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: import "github.com/go-jose/go-jose/v3" -All new feature development takes place on the `master` branch, which we are -preparing to release as version 3 soon. Version 2 will continue to receive -critical bug and security fixes. Note that starting with version 3 we are -using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. - -Version 1 (on the `v1` branch) is frozen and not supported anymore. +The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which +are still useable but not actively developed anymore. ### Supported algorithms See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) standard where possible. The Godoc reference has a list of constants. Key encryption | Algorithm identifier(s) @@ -103,20 +96,20 @@ allows attaching a key id. Algorithm(s) | Corresponding types :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) AES, HMAC | []byte 1. Only available in version 2 or later of the package ## Examples -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt) Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util) subdirectory also contains a small command-line utility which might be useful as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/SECURITY.md b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md new file mode 100644 index 00000000..2f18a75a --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy +This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. + +## Supported Versions +| Version | Supported | +| ------- | ----------| +| >= v3 | ✓ | +| v2 | ✗ | +| v1 | ✗ | + +## Reporting a vulnerability + +Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go index 78abc326..d4d4961b 100644 --- a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm switch alg { case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) case PS256, PS384, PS512: out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go index 6901137e..506d3b7b 100644 --- a/vendor/github.com/go-jose/go-jose/v3/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -21,7 +21,6 @@ import ( "crypto/rsa" "errors" "fmt" - "reflect" "github.com/go-jose/go-jose/v3/json" ) @@ -76,14 +75,24 @@ type recipientKeyInfo struct { type EncrypterOptions struct { Compression CompressionAlgorithm - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // Optional map of name/value pairs to be inserted into the protected + // header of a JWS object. Some specifications which make use of + // JWS require additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated EncrypterOptions. +// +// The v parameter will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { if eo.ExtraHeaders == nil { eo.ExtraHeaders = map[HeaderKey]interface{}{} @@ -111,7 +120,17 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { // default of 100000 will be used for the count and a 128-bit random salt will // be generated. type Recipient struct { - Algorithm KeyAlgorithm + Algorithm KeyAlgorithm + // Key must have one of these types: + // - ed25519.PublicKey + // - *ecdsa.PublicKey + // - *rsa.PublicKey + // - *JSONWebKey + // - JSONWebKey + // - []byte (a symmetric key) + // - Any type that satisfies the OpaqueKeyEncrypter interface + // + // The type of Key must match the value of Algorithm. Key interface{} KeyID string PBES2Count int @@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) switch rcpt.Algorithm { case DIRECT: // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + keyBytes, ok := rawKey.([]byte) + if !ok { return nil, ErrUnsupportedKeyType } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + if encrypter.cipher.keySize() != len(keyBytes) { return nil, ErrInvalidKeySize } encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), + key: keyBytes, } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) return encrypter, nil case ECDH_ES: // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + keyDSA, ok := rawKey.(*ecdsa.PublicKey) + if !ok { return nil, ErrUnsupportedKeyType } encrypter.keyGenerator = ecKeyGenerator{ size: encrypter.cipher.keySize(), algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), + publicKey: keyDSA, } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey recipient, err := makeJWERecipient(alg, encryptionKey.Key) recipient.keyID = encryptionKey.KeyID return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) + case OpaqueKeyEncrypter: + return newOpaqueKeyEncrypter(alg, encryptionKey) } return recipientKeyInfo{}, ErrUnsupportedKeyType } @@ -300,11 +319,11 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { return newDecrypter(decryptionKey.Key) case *JSONWebKey: return newDecrypter(decryptionKey.Key) + case OpaqueKeyDecrypter: + return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType } // Implementation of encrypt method producing a JWE object. @@ -403,9 +422,24 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { } } -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient +// Decrypt and validate the object and return the plaintext. This +// function does not support multi-recipient. If you desire multi-recipient // decryption use DecryptMulti instead. +// +// The decryptionKey argument must contain a private or symmetric key +// and must have one of these types: +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (a symmetric key) +// - string (a symmetric key) +// - Any type that satisfies the OpaqueKeyDecrypter interface. +// +// Note that ed25519 is only available for signatures, not encryption, so is +// not an option here. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { headers := obj.mergedHeaders(nil) @@ -462,15 +496,21 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { plaintext, err = decompress(comp, plaintext) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } - return plaintext, err + return plaintext, nil } // DecryptMulti decrypts and validates the object and returns the plaintexts, // with support for multiple recipients. It returns the index of the recipient // for which the decryption was successful, the merged headers for that recipient, // and the plaintext. +// +// The decryptionKey argument must have one of the types allowed for the +// decryptionKey argument of Decrypt(). func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) @@ -532,7 +572,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { - plaintext, _ = decompress(comp, plaintext) + plaintext, err = decompress(comp, plaintext) + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } sanitized, err := headers.sanitized() diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go index 71ec1c41..0ad40ca0 100644 --- a/vendor/github.com/go-jose/go-jose/v3/doc.go +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -15,13 +15,11 @@ */ /* - Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. It implements encryption and signing based on the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web Token support available in a sub-package. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. - */ package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go index 968a4249..62f8b8ad 100644 --- a/vendor/github.com/go-jose/go-jose/v3/encoding.go +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -189,3 +189,36 @@ func base64URLDecode(value string) ([]byte, error) { value = strings.TrimRight(value, "=") return base64.RawURLEncoding.DecodeString(value) } + +func base64EncodeLen(sl []byte) int { + return base64.RawURLEncoding.EncodedLen(len(sl)) +} + +func base64JoinWithDots(inputs ...[]byte) string { + if len(inputs) == 0 { + return "" + } + + // Count of dots. + totalCount := len(inputs) - 1 + + for _, input := range inputs { + totalCount += base64EncodeLen(input) + } + + out := make([]byte, totalCount) + startEncode := 0 + for i, input := range inputs { + base64.RawURLEncoding.Encode(out[startEncode:], input) + + if i == len(inputs)-1 { + continue + } + + startEncode += base64EncodeLen(input) + out[startEncode] = '.' + startEncode++ + } + + return string(out) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go index 4dbc4146..50634dd8 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/decode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -75,14 +75,13 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go index ea0a1361..98de68ce 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/encode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -58,6 +58,7 @@ import ( // becomes a member of the object unless // - the field's tag is "-", or // - the field is empty and its tag specifies the "omitempty" option. +// // The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or string of // length zero. The object's default key string is the struct field name @@ -65,28 +66,28 @@ import ( // the struct field's tag value is the key name, followed by an optional comma // and options. Examples: // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, dollar signs, percent signs, hyphens, @@ -133,7 +134,6 @@ import ( // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. -// func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) diff --git a/vendor/github.com/go-jose/go-jose/v3/json/stream.go b/vendor/github.com/go-jose/go-jose/v3/json/stream.go index 9b2b926b..f03b171e 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/stream.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/stream.go @@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token interface{} const ( diff --git a/vendor/github.com/go-jose/go-jose/v3/jwe.go b/vendor/github.com/go-jose/go-jose/v3/jwe.go index bce30450..4267ac75 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwe.go @@ -252,13 +252,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) { serializedProtected := mustSerializeJSON(obj.protected) - return fmt.Sprintf( - "%s.%s.%s.%s.%s", - base64.RawURLEncoding.EncodeToString(serializedProtected), - base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey), - base64.RawURLEncoding.EncodeToString(obj.iv), - base64.RawURLEncoding.EncodeToString(obj.ciphertext), - base64.RawURLEncoding.EncodeToString(obj.tag)), nil + return base64JoinWithDots( + serializedProtected, + obj.recipients[0].encryptedKey, + obj.iv, + obj.ciphertext, + obj.tag, + ), nil } // FullSerialize serializes an object using the full JSON serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/jwk.go b/vendor/github.com/go-jose/go-jose/v3/jwk.go index 78ff5aca..e4021959 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwk.go @@ -67,9 +67,21 @@ type rawJSONWebKey struct { X5tSHA256 string `json:"x5t#S256,omitempty"` } -// JSONWebKey represents a public or private key in JWK format. +// JSONWebKey represents a public or private key in JWK format. It can be +// marshaled into JSON and unmarshaled from JSON. type JSONWebKey struct { - // Cryptographic key, can be a symmetric or asymmetric key. + // Key is the Go in-memory representation of this key. It must have one + // of these types: + // - ed25519.PublicKey + // - ed25519.PrivateKey + // - *ecdsa.PublicKey + // - *ecdsa.PrivateKey + // - *rsa.PublicKey + // - *rsa.PrivateKey + // - []byte (a symmetric key) + // + // When marshaling this JSONWebKey into JSON, the "kty" header parameter + // will be automatically set based on the type of this field. Key interface{} // Key identifier, parsed from `kid` header. KeyID string @@ -389,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { input, err = rsaThumbprintInput(key.N, key.E) case ed25519.PrivateKey: input, err = edThumbprintInput(ed25519.PublicKey(key[32:])) + case OpaqueSigner: + return key.Public().Thumbprint(hash) default: return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } diff --git a/vendor/github.com/go-jose/go-jose/v3/jws.go b/vendor/github.com/go-jose/go-jose/v3/jws.go index 865f16ad..e37007db 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jws.go +++ b/vendor/github.com/go-jose/go-jose/v3/jws.go @@ -314,15 +314,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) { return "", ErrNotSupported } - serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected)) - payload := "" - signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature) + serializedProtected := mustSerializeJSON(obj.Signatures[0].protected) + var payload []byte if !detached { - payload = base64.RawURLEncoding.EncodeToString(obj.payload) + payload = obj.payload } - return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil + return base64JoinWithDots( + serializedProtected, + payload, + obj.Signatures[0].Signature, + ), nil } // CompactSerialize serializes an object using the compact serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/opaque.go b/vendor/github.com/go-jose/go-jose/v3/opaque.go index fc3e8d2e..68db085e 100644 --- a/vendor/github.com/go-jose/go-jose/v3/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v3/opaque.go @@ -121,7 +121,7 @@ func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipie return oke.encrypter.encryptKey(cek, alg) } -//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. +// OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. type OpaqueKeyDecrypter interface { DecryptKey(encryptedKey []byte, header Header) ([]byte, error) } diff --git a/vendor/github.com/go-jose/go-jose/v3/shared.go b/vendor/github.com/go-jose/go-jose/v3/shared.go index fc2505e0..489a04e3 100644 --- a/vendor/github.com/go-jose/go-jose/v3/shared.go +++ b/vendor/github.com/go-jose/go-jose/v3/shared.go @@ -183,8 +183,13 @@ type Header struct { // Unverified certificate chain parsed from x5c header. certificates []*x509.Certificate - // Any headers not recognised above get unmarshalled - // from JSON in a generic manner and placed in this map. + // At parse time, each header parameter with a name other than "kid", + // "jwk", "alg", "nonce", or "x5c" will have its value passed to + // [json.Unmarshal] to unmarshal it into an interface value. + // The resulting value will be stored in this map, with the header + // parameter name as the key. + // + // [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal ExtraHeaders map[HeaderKey]interface{} } diff --git a/vendor/github.com/go-jose/go-jose/v3/signing.go b/vendor/github.com/go-jose/go-jose/v3/signing.go index 81d55f58..52f3d856 100644 --- a/vendor/github.com/go-jose/go-jose/v3/signing.go +++ b/vendor/github.com/go-jose/go-jose/v3/signing.go @@ -40,6 +40,15 @@ type Signer interface { } // SigningKey represents an algorithm/key used to sign a message. +// +// Key must have one of these types: +// - ed25519.PrivateKey +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that satisfies the OpaqueSigner interface type SigningKey struct { Algorithm SignatureAlgorithm Key interface{} @@ -52,12 +61,22 @@ type SignerOptions struct { // Optional map of additional keys to be inserted into the protected header // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated SignerOptions. +// +// The v argument will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions { if so.ExtraHeaders == nil { so.ExtraHeaders = map[HeaderKey]interface{}{} @@ -173,11 +192,11 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) { return newVerifier(verificationKey.Key) case *JSONWebKey: return newVerifier(verificationKey.Key) + case OpaqueVerifier: + return &opaqueVerifier{verifier: verificationKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if ov, ok := verificationKey.(OpaqueVerifier); ok { - return &opaqueVerifier{verifier: ov}, nil - } - return nil, ErrUnsupportedKeyType } func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error { @@ -204,11 +223,11 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient return newJWKSigner(alg, signingKey) case *JSONWebKey: return newJWKSigner(alg, *signingKey) + case OpaqueSigner: + return newOpaqueSigner(alg, signingKey) + default: + return recipientSigInfo{}, ErrUnsupportedKeyType } - if signer, ok := signingKey.(OpaqueSigner); ok { - return newOpaqueSigner(alg, signer) - } - return recipientSigInfo{}, ErrUnsupportedKeyType } func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) { @@ -321,12 +340,21 @@ func (ctx *genericSigner) Options() SignerOptions { } // Verify validates the signature on the object and returns the payload. -// This function does not support multi-signature, if you desire multi-sig +// This function does not support multi-signature. If you desire multi-signature // verification use VerifyMulti instead. // // Be careful when verifying signatures based on embedded JWKs inside the // payload header. You cannot assume that the key received in a payload is // trusted. +// +// The verificationKey argument must have one of these types: +// - ed25519.PublicKey +// - *ecdsa.PublicKey +// - *rsa.PublicKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that implements the OpaqueVerifier interface. func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) { err := obj.DetachedVerify(obj.payload, verificationKey) if err != nil { @@ -346,6 +374,9 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // most cases, you will probably want to use Verify instead. DetachedVerify // is only useful if you have a payload and signature that are separated from // each other. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) @@ -388,6 +419,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter // returns the index of the signature that was verified, along with the signature // object and the payload. We return the signature and index to guarantee that // callers are getting the verified value. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) { idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey) if err != nil { @@ -405,6 +439,9 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // DetachedVerifyMulti is only useful if you have a payload and signature that are // separated from each other, and the signature can have multiple signers at the // same time. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) diff --git a/vendor/github.com/go-jose/go-jose/v3/symmetric.go b/vendor/github.com/go-jose/go-jose/v3/symmetric.go index 1ffd2708..10d8e19f 100644 --- a/vendor/github.com/go-jose/go-jose/v3/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/symmetric.go @@ -40,12 +40,17 @@ var RandReader = rand.Reader const ( // RFC7518 recommends a minimum of 1,000 iterations: - // https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // - https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // // NIST recommends a minimum of 10,000: - // https://pages.nist.gov/800-63-3/sp800-63b.html - // 1Password uses 100,000: - // https://support.1password.com/pbkdf2/ - defaultP2C = 100000 + // - https://pages.nist.gov/800-63-3/sp800-63b.html + // + // 1Password increased in 2023 from 100,000 to 650,000: + // - https://support.1password.com/pbkdf2/ + // + // OWASP recommended 600,000 in Dec 2022: + // - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + defaultP2C = 600000 // Default salt size: 128 bits defaultP2SSize = 16 ) diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index ce99ce52..888e107c 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -16,6 +16,7 @@ package strfmt import ( "encoding" + stderrors "errors" "fmt" "reflect" "strings" @@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { case "datetime": input := data if len(input) == 0 { - return nil, fmt.Errorf("empty string is an invalid datetime format") + return nil, stderrors.New("empty string is an invalid datetime format") } return ParseDateTime(input) case "duration": diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829d..7ec5ac7e 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4be..dc60082d 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb..3167b643 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index ac281fb7..1f72cdde 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,10 +16,14 @@ This package provides various compression algorithms. # changelog +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + * Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) - * flate: Fix reset with dictionary on custom window encodes by @klauspost in https://github.com/klauspost/compress/pull/912 - * zstd: Add Frame header encoding and stripping by @klauspost in https://github.com/klauspost/compress/pull/908 - * zstd: Limit better/best default window to 8MB by @klauspost in https://github.com/klauspost/compress/pull/913 + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/moby/sys/user/LICENSE similarity index 94% rename from vendor/github.com/opencontainers/runc/LICENSE rename to vendor/github.com/moby/sys/user/LICENSE index 27448585..d6456956 100644 --- a/vendor/github.com/opencontainers/runc/LICENSE +++ b/vendor/github.com/moby/sys/user/LICENSE @@ -176,7 +176,18 @@ END OF TERMS AND CONDITIONS - Copyright 2014 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/moby/sys/user/lookup_unix.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go rename to vendor/github.com/moby/sys/user/lookup_unix.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/moby/sys/user/user.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/user.go rename to vendor/github.com/moby/sys/user/user.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/moby/sys/user/user_fuzzer.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go rename to vendor/github.com/moby/sys/user/user_fuzzer.go diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE deleted file mode 100644 index 5c97abce..00000000 --- a/vendor/github.com/opencontainers/runc/NOTICE +++ /dev/null @@ -1,17 +0,0 @@ -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 4e7717d5..d1236ba7 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -187,6 +187,10 @@ type Hook struct { type Hooks struct { // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed. // It is called in the Runtime Namespace + // + // Deprecated: use [Hooks.CreateRuntime], [Hooks.CreateContainer], and + // [Hooks.StartContainer] instead, which allow more granular hook control + // during the create and start phase. Prestart []Hook `json:"prestart,omitempty"` // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called // It is called in the Runtime Namespace @@ -371,6 +375,12 @@ type LinuxMemory struct { // Total memory limit (memory + swap). Swap *int64 `json:"swap,omitempty"` // Kernel memory limit (in bytes). + // + // Deprecated: kernel-memory limits are not supported in cgroups v2, and + // were obsoleted in [kernel v5.4]. This field should no longer be used, + // as it may be ignored by runtimes. + // + // [kernel v5.4]: https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0 Kernel *int64 `json:"kernel,omitempty"` // Kernel memory limit for tcp (in bytes) KernelTCP *int64 `json:"kernelTCP,omitempty"` diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index b3fca349..503971e0 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 + VersionMinor = 2 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go index e745911a..d9ee1234 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go @@ -15,8 +15,120 @@ // Package oauth implements OAuth/OIDC support for device and token flows package oauth +import ( + "bytes" + "fmt" + "text/template" +) + +// GetInteractiveSuccessHTML is the page displayed upon success when using a web browser during an interactive Oauth token flow. +// The page will close automatically if autoclose is true with the timeout specified. +func GetInteractiveSuccessHTML(autoclose bool, timeout int) (string, error) { + const successTemplate = ` + + + Sigstore Authentication + + + + +
+
+ + + + + +
+
+
+ sigstore + authentication successful! +
+ {{ if .Autoclose -}} + + + {{- else -}} +
+ You may now close this page. +
+ {{- end }} +
+ +
+ + + {{ if .Autoclose -}} + + {{- end }} + + +` + // Parse the template + tmpl, err := template.New("success").Parse(successTemplate) + if err != nil { + return "", fmt.Errorf("error parsing success template: %w", err) + } + // Pass autoclose and timeout to the template + data := struct { + Autoclose bool + Timeout int + }{ + autoclose, + timeout, + } + var htmlPage bytes.Buffer + if err := tmpl.Execute(&htmlPage, data); err != nil { + return "", fmt.Errorf("error executing template: %w", err) + } + return htmlPage.String(), nil +} + const ( - // InteractiveSuccessHTML is the page displayed upon success when using a web browser during an interactive Oauth token flow. + // InteractiveSuccessHTML (deprecated) is the page displayed upon success when using a web browser during an interactive Oauth token flow. InteractiveSuccessHTML = ` diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/client_credentials.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/client_credentials.go new file mode 100644 index 00000000..8a1a4e0a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/client_credentials.go @@ -0,0 +1,156 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oauthflow + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "golang.org/x/oauth2" +) + +// CodeURL fetches the client credentials token authorization endpoint URL from the provider's well-known configuration endpoint +func (d *DefaultFlowClientCredentials) CodeURL() (string, error) { + if d.codeURL != "" { + return d.codeURL, nil + } + + wellKnown := strings.TrimSuffix(d.Issuer, "/") + "/.well-known/openid-configuration" + /* #nosec */ + httpClient := &http.Client{ + Timeout: 3 * time.Second, + } + resp, err := httpClient.Get(wellKnown) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("unable to read response body: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("%s: %s", resp.Status, body) + } + + providerConfig := struct { + Issuer string `json:"issuer"` + TokenEndpoint string `json:"token_endpoint"` + }{} + if err = json.Unmarshal(body, &providerConfig); err != nil { + return "", fmt.Errorf("oidc: failed to decode provider discovery object: %w", err) + } + + if d.Issuer != providerConfig.Issuer { + return "", fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", d.Issuer, providerConfig.Issuer) + } + + if providerConfig.TokenEndpoint == "" { + return "", fmt.Errorf("oidc: client credentials token authorization endpoint not returned by provider") + } + + d.codeURL = providerConfig.TokenEndpoint + return d.codeURL, nil +} + +// DefaultFlowClientCredentials fetches an OIDC Identity token using the Client Credentials Grant flow as specified in RFC8628 +type DefaultFlowClientCredentials struct { + Issuer string + codeURL string +} + +// NewClientCredentialsFlow creates a new DefaultFlowClientCredentials that retrieves an OIDC Identity Token using a Client Credentials Grant +func NewClientCredentialsFlow(issuer string) *DefaultFlowClientCredentials { + return &DefaultFlowClientCredentials{ + Issuer: issuer, + } +} + +func (d *DefaultFlowClientCredentials) clientCredentialsFlow(_ *oidc.Provider, clientID, clientSecret, redirectURL string) (string, error) { + data := url.Values{ + "client_id": []string{clientID}, + "client_secret": []string{clientSecret}, + "scope": []string{"openid email"}, + "grant_type": []string{"client_credentials"}, + } + if redirectURL != "" { + // If a redirect uri is provided then use it + data["redirect_uri"] = []string{redirectURL} + } + + codeURL, err := d.CodeURL() + if err != nil { + return "", err + } + /* #nosec */ + resp, err := http.PostForm(codeURL, data) + if err != nil { + return "", err + } + defer resp.Body.Close() + + b, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("%s: %s", resp.Status, b) + } + + tr := tokenResp{} + if err := json.Unmarshal(b, &tr); err != nil { + return "", err + } + + if tr.IDToken != "" { + fmt.Println("Token received!") + return tr.IDToken, nil + } + + return "", fmt.Errorf("unexpected error in client flow: %s", tr.Error) +} + +// GetIDToken gets an OIDC ID Token from the specified provider using the Client Credentials Grant flow +func (d *DefaultFlowClientCredentials) GetIDToken(p *oidc.Provider, cfg oauth2.Config) (*OIDCIDToken, error) { + idToken, err := d.clientCredentialsFlow(p, cfg.ClientID, cfg.ClientSecret, cfg.RedirectURL) + if err != nil { + return nil, err + } + verifier := p.Verifier(&oidc.Config{ClientID: cfg.ClientID}) + parsedIDToken, err := verifier.Verify(context.Background(), idToken) + if err != nil { + return nil, err + } + + subj, err := SubjectFromToken(parsedIDToken) + if err != nil { + return nil, err + } + + return &OIDCIDToken{ + RawString: idToken, + Subject: subj, + }, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go index 38df9700..28abcac5 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go @@ -19,6 +19,7 @@ import ( "context" "encoding/json" "errors" + "log" "github.com/coreos/go-oidc/v3/oidc" "github.com/go-jose/go-jose/v3" @@ -46,6 +47,17 @@ type OIDCIDToken struct { Subject string // Subject is the extracted subject from the raw token } +// init +func init() { + // set the default HTML page for the DefaultIDTokenGetter + htmlPage, err := soauth.GetInteractiveSuccessHTML(false, 10) + if err != nil { + log.Print("failed to get interactive success html, defaulting to original static page") + } else { + DefaultIDTokenGetter.HTMLPage = htmlPage + } +} + // ConnectorIDOpt requests the value of prov as a the connector_id (either on URL or in form body) on the initial request; // this is used by Dex func ConnectorIDOpt(prov string) oauth2.AuthCodeOption { diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go new file mode 100644 index 00000000..d1660796 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go @@ -0,0 +1,211 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var ed25519phSupportedHashFuncs = []crypto.Hash{ + crypto.SHA512, +} + +// ED25519phSigner is a signature.Signer that uses the Ed25519 public-key signature system with pre-hashing +type ED25519phSigner struct { + priv ed25519.PrivateKey +} + +// LoadED25519phSigner calculates signatures using the specified private key. +func LoadED25519phSigner(priv ed25519.PrivateKey) (*ED25519phSigner, error) { + if priv == nil { + return nil, errors.New("invalid ED25519 private key specified") + } + + return &ED25519phSigner{ + priv: priv, + }, nil +} + +// ToED25519SignerVerifier creates a ED25519SignerVerifier from a ED25519phSignerVerifier +// +// Clients that use ED25519phSignerVerifier should use this method to get a +// SignerVerifier that uses the same ED25519 private key, but with the Pure +// Ed25519 algorithm. This might be necessary to interact with Fulcio, which +// only supports the Pure Ed25519 algorithm. +func (e ED25519phSignerVerifier) ToED25519SignerVerifier() (*ED25519SignerVerifier, error) { + return LoadED25519SignerVerifier(e.priv) +} + +// SignMessage signs the provided message. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the ED25519phSigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e ED25519phSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, _, err := ComputeDigestForSigning(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + return e.priv.Sign(nil, digest, crypto.SHA512) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ED25519phSigner) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified message; the first and third arguments to this +// function are ignored as they are not used by the ED25519ph algorithm. +func (e ED25519phSigner) Sign(_ io.Reader, digest []byte, _ crypto.SignerOpts) ([]byte, error) { + return e.SignMessage(nil, options.WithDigest(digest)) +} + +// ED25519phVerifier is a signature.Verifier that uses the Ed25519 public-key signature system +type ED25519phVerifier struct { + publicKey ed25519.PublicKey +} + +// LoadED25519phVerifier returns a Verifier that verifies signatures using the +// specified ED25519 public key. +func LoadED25519phVerifier(pub ed25519.PublicKey) (*ED25519phVerifier, error) { + if pub == nil { + return nil, errors.New("invalid ED25519 public key specified") + } + + return &ED25519phVerifier{ + publicKey: pub, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e *ED25519phVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the ED25519phVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e *ED25519phVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + digest, _, err := ComputeDigestForVerifying(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return err + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + if err := ed25519.VerifyWithOptions(e.publicKey, digest, sigBytes, &ed25519.Options{Hash: crypto.SHA512}); err != nil { + return fmt.Errorf("failed to verify signature: %w", err) + } + return nil +} + +// ED25519phSignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system +type ED25519phSignerVerifier struct { + *ED25519phSigner + *ED25519phVerifier +} + +// LoadED25519phSignerVerifier creates a combined signer and verifier. This is +// a convenience object that simply wraps an instance of ED25519phSigner and ED25519phVerifier. +func LoadED25519phSignerVerifier(priv ed25519.PrivateKey) (*ED25519phSignerVerifier, error) { + signer, err := LoadED25519phSigner(priv) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") + } + verifier, err := LoadED25519phVerifier(pub) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ED25519phSignerVerifier{ + ED25519phSigner: signer, + ED25519phVerifier: verifier, + }, nil +} + +// NewDefaultED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using crypto/rand as an entropy source. +func NewDefaultED25519phSignerVerifier() (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + return NewED25519phSignerVerifier(rand.Reader) +} + +// NewED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using the specified entropy source. +func NewED25519phSignerVerifier(rand io.Reader) (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadED25519phSignerVerifier(priv) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go index 0be699f7..e17e768c 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go @@ -18,6 +18,7 @@ package signature import ( "context" "crypto" + "crypto/rsa" "io" "github.com/sigstore/sigstore/pkg/signature/options" @@ -55,3 +56,10 @@ type VerifyOption interface { RPCOption MessageOption } + +// LoadOption specifies options to be used when creating a Signer/Verifier +type LoadOption interface { + ApplyHash(*crypto.Hash) + ApplyED25519ph(*bool) + ApplyRSAPSS(**rsa.PSSOptions) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go new file mode 100644 index 00000000..e5f3f011 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go @@ -0,0 +1,76 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "crypto" + "crypto/rsa" +) + +// RequestHash implements the functional option pattern for setting a Hash +// function when loading a signer or verifier +type RequestHash struct { + NoOpOptionImpl + hashFunc crypto.Hash +} + +// ApplyHash sets the hash as requested by the functional option +func (r RequestHash) ApplyHash(hash *crypto.Hash) { + *hash = r.hashFunc +} + +// WithHash specifies that the given hash function should be used when loading a signer or verifier +func WithHash(hash crypto.Hash) RequestHash { + return RequestHash{hashFunc: hash} +} + +// RequestED25519ph implements the functional option pattern for specifying +// ED25519ph (pre-hashed) should be used when loading a signer or verifier and a +// ED25519 key is +type RequestED25519ph struct { + NoOpOptionImpl + useED25519ph bool +} + +// ApplyED25519ph sets the ED25519ph flag as requested by the functional option +func (r RequestED25519ph) ApplyED25519ph(useED25519ph *bool) { + *useED25519ph = r.useED25519ph +} + +// WithED25519ph specifies that the ED25519ph algorithm should be used when a ED25519 key is used +func WithED25519ph() RequestED25519ph { + return RequestED25519ph{useED25519ph: true} +} + +// RequestPSSOptions implements the functional option pattern for specifying RSA +// PSS should be used when loading a signer or verifier and a RSA key is +// detected +type RequestPSSOptions struct { + NoOpOptionImpl + opts *rsa.PSSOptions +} + +// ApplyRSAPSS sets the RSAPSS options as requested by the functional option +func (r RequestPSSOptions) ApplyRSAPSS(opts **rsa.PSSOptions) { + *opts = r.opts +} + +// WithRSAPSS specifies that the RSAPSS algorithm should be used when a RSA key is used +// Note that the RSA PSSOptions contains an hash algorithm, which will override +// the hash function specified with WithHash. +func WithRSAPSS(opts *rsa.PSSOptions) RequestPSSOptions { + return RequestPSSOptions{opts: opts} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go index c7f1ccb9..0c0e5185 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go @@ -18,6 +18,7 @@ package options import ( "context" "crypto" + "crypto/rsa" "io" ) @@ -47,3 +48,12 @@ func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {} // ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {} + +// ApplyHash is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyHash(_ *crypto.Hash) {} + +// ApplyED25519ph is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyED25519ph(_ *bool) {} + +// ApplyRSAPSS is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRSAPSS(_ **rsa.PSSOptions) {} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go index 3bd3823c..e26def9f 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -30,6 +30,7 @@ import ( _ "crypto/sha512" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" // these ensure we have the implementations loaded _ "golang.org/x/crypto/sha3" @@ -59,12 +60,33 @@ func (s SignerOpts) HashFunc() crypto.Hash { // If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a // RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly. func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) { + return LoadSignerWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerWithOpts returns a signature.Signer based on the algorithm of the private key +// provided. +func LoadSignerWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := privateKey.(type) { case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSigner(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15Signer(pk, hashFunc) case *ecdsa.PrivateKey: return LoadECDSASigner(pk, hashFunc) case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSigner(pk) + } return LoadED25519Signer(pk) } return nil, errors.New("unsupported public key type") @@ -87,3 +109,17 @@ func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.Pas } return LoadSigner(priv, hashFunc) } + +// LoadSignerFromPEMFileWithOpts returns a signature.Signer based on the algorithm of the private key +// in the file. The Signer will use the hash function specified in the options when computing digests. +func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (Signer, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerWithOpts(priv, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go index 90667f2a..70253b12 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go @@ -25,6 +25,7 @@ import ( "path/filepath" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" ) // SignerVerifier creates and verifies digital signatures over a message using a specified key pair @@ -39,12 +40,33 @@ type SignerVerifier interface { // If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a // RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly. func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) { + return LoadSignerVerifierWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerVerifierWithOpts returns a signature.SignerVerifier based on the +// algorithm of the private key provided and the user's choice. +func LoadSignerVerifierWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := privateKey.(type) { case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSignerVerifier(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc) case *ecdsa.PrivateKey: return LoadECDSASignerVerifier(pk, hashFunc) case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSignerVerifier(pk) + } return LoadED25519SignerVerifier(pk) } return nil, errors.New("unsupported public key type") @@ -67,3 +89,17 @@ func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptou } return LoadSignerVerifier(priv, hashFunc) } + +// LoadSignerVerifierFromPEMFileWithOpts returns a signature.SignerVerifier based on the algorithm of the private key +// in the file. The SignerVerifier will use the hash function specified in the options when computing digests. +func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (SignerVerifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerVerifierWithOpts(priv, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go index 9ca60492..cdde9fc5 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go @@ -26,6 +26,7 @@ import ( "path/filepath" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" ) // Verifier verifies the digital signature using a specified public key @@ -40,12 +41,33 @@ type Verifier interface { // If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a // RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) { + return LoadVerifierWithOpts(publicKey, options.WithHash(hashFunc)) +} + +// LoadVerifierWithOpts returns a signature.Verifier based on the algorithm of the public key +// provided that will use the hash function specified when computing digests. +func LoadVerifierWithOpts(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := publicKey.(type) { case *rsa.PublicKey: + if rsaPSSOptions != nil { + return LoadRSAPSSVerifier(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15Verifier(pk, hashFunc) case *ecdsa.PublicKey: return LoadECDSAVerifier(pk, hashFunc) case ed25519.PublicKey: + if useED25519ph { + return LoadED25519phVerifier(pk) + } return LoadED25519Verifier(pk) } return nil, errors.New("unsupported public key type") @@ -98,3 +120,19 @@ func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error return LoadVerifier(pubKey, hashFunc) } + +// LoadVerifierFromPEMFileWithOpts returns a signature.Verifier based on the contents of a +// file located at path. The Verifier wil use the hash function specified in the options when computing digests. +func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes) + if err != nil { + return nil, err + } + + return LoadVerifierWithOpts(pubKey, opts...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88..4d4b4aad 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903..00000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a..00000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c7..3ddab109 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec..a84e09bd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba..0b7570f2 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false } - return false + if !isNumericType(expectedType) || !isNumericType(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) + } + + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - if isNilableKind && value.IsNil() { - return true + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28..861ed4b7 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 63f85214..506a82f8 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf t.FailNow() } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b0933..eee8310a 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go index 8b4202d8..18443a91 100644 --- a/vendor/github.com/stretchr/testify/suite/suite.go +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -58,7 +58,7 @@ func (suite *Suite) Require() *require.Assertions { suite.mu.Lock() defer suite.mu.Unlock() if suite.require == nil { - suite.require = require.New(suite.T()) + panic("'Require' must not be called before 'Run' or 'SetT'") } return suite.require } @@ -72,17 +72,19 @@ func (suite *Suite) Assert() *assert.Assertions { suite.mu.Lock() defer suite.mu.Unlock() if suite.Assertions == nil { - suite.Assertions = assert.New(suite.T()) + panic("'Assert' must not be called before 'Run' or 'SetT'") } return suite.Assertions } func recoverAndFailOnPanic(t *testing.T) { + t.Helper() r := recover() failOnPanic(t, r) } func failOnPanic(t *testing.T, r interface{}) { + t.Helper() if r != nil { t.Errorf("test panicked: %v\n%s", r, debug.Stack()) t.FailNow() @@ -96,19 +98,20 @@ func failOnPanic(t *testing.T, r interface{}) { func (suite *Suite) Run(name string, subtest func()) bool { oldT := suite.T() - if setupSubTest, ok := suite.s.(SetupSubTest); ok { - setupSubTest.SetupSubTest() - } - - defer func() { - suite.SetT(oldT) - if tearDownSubTest, ok := suite.s.(TearDownSubTest); ok { - tearDownSubTest.TearDownSubTest() - } - }() - return oldT.Run(name, func(t *testing.T) { suite.SetT(t) + defer suite.SetT(oldT) + + defer recoverAndFailOnPanic(t) + + if setupSubTest, ok := suite.s.(SetupSubTest); ok { + setupSubTest.SetupSubTest() + } + + if tearDownSubTest, ok := suite.s.(TearDownSubTest); ok { + defer tearDownSubTest.TearDownSubTest() + } + subtest() }) } @@ -164,6 +167,8 @@ func Run(t *testing.T, suite TestingSuite) { suite.SetT(t) defer recoverAndFailOnPanic(t) defer func() { + t.Helper() + r := recover() if stats != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go index 6ca8d9ad..652aa48b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -15,16 +15,15 @@ import ( // ArrayCodec is the Codec used for bsoncore.Array values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. +// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. type ArrayCodec struct{} var defaultArrayCodec = NewArrayCodec() // NewArrayCodec returns an ArrayCodec. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. +// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See +// [ArrayCodec] for more details. func NewArrayCodec() *ArrayCodec { return &ArrayCodec{} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index dde3e768..0134b5a9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -17,13 +17,28 @@ import ( // ByteSliceCodec is the Codec used for []byte values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. +// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver +// 2.0. To configure the byte slice encode and decode behavior, use the +// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to encode nil byte slices as empty +// BSON binary values, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilByteSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in ByteSliceCodec for the +// corresponding settings. type ByteSliceCodec struct { // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values // instead of BSON null. // - // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead. + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty + // instead. EncodeNilAsEmpty bool } @@ -38,8 +53,8 @@ var ( // NewByteSliceCodec returns a ByteSliceCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. +// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See +// [ByteSliceCodec] for more details. func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) codec := ByteSliceCodec{} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 2ce11973..7e08aab3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -41,7 +41,7 @@ func newDefaultStructCodec() *StructCodec { if err != nil { // This function is called from the codec registration path, so errors can't be propagated. If there's an error // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) + panic(fmt.Errorf("error creating default StructCodec: %w", err)) } return codec } @@ -178,7 +178,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe for { key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } else if err != nil { return err @@ -1379,7 +1379,7 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value keyType := val.Type().Key() for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -1675,7 +1675,7 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR idx := 0 for { vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { + if errors.Is(err, bsonrw.ErrEOA) { break } if err != nil { @@ -1787,7 +1787,7 @@ func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr b elems := make([]reflect.Value, 0) for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 4ab14a66..4751ae99 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -343,7 +343,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -352,7 +352,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -418,7 +418,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -427,7 +427,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -487,7 +487,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -496,7 +496,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index 94f7dcf1..098368f0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -17,13 +17,27 @@ import ( // EmptyInterfaceCodec is the Codec used for interface{} values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. +// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go +// Driver 2.0. To configure the empty interface encode and decode behavior, use +// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to unmarshal BSON binary field +// values as a Go byte slice, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// BinaryAsSlice: true, +// }) +// +// See the deprecation notice for each field in EmptyInterfaceCodec for the +// corresponding settings. type EmptyInterfaceCodec struct { // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. // - // Deprecated: Use bson.Decoder.BinaryAsSlice instead. + // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. DecodeBinaryAsSlice bool } @@ -38,8 +52,8 @@ var ( // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. +// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See +// [EmptyInterfaceCodec] for more details. func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index 325c1738..d7e00ffa 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -8,6 +8,7 @@ package bsoncodec import ( "encoding" + "errors" "fmt" "reflect" "strconv" @@ -21,25 +22,40 @@ var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. +// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To +// configure the map encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON +// documents, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilMapAsEmpty: true, +// }) +// +// See the deprecation notice for each field in MapCodec for the corresponding +// settings. type MapCodec struct { // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination // value passed to Decode before unmarshaling BSON documents into them. // - // Deprecated: Use bson.Decoder.ZeroMaps instead. + // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. DecodeZerosMap bool // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of // BSON null. // - // Deprecated: Use bson.Encoder.NilMapAsEmpty instead. + // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. EncodeNilAsEmpty bool // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name // strings using fmt.Sprintf() instead of the default string conversion logic. // - // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead. + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or + // options.BSONOptions.StringifyMapKeysWithFmt instead. EncodeKeysWithStringer bool } @@ -61,8 +77,8 @@ type KeyUnmarshaler interface { // NewMapCodec returns a MapCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. +// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See +// [MapCodec] for more details. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -128,7 +144,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v } currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -137,7 +153,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -200,7 +216,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -313,7 +329,7 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, if mc.EncodeKeysWithStringer { parsed, err := strconv.ParseFloat(key, 64) if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) } keyVal = reflect.ValueOf(parsed) break diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index e5923230..ddfa4a33 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -18,8 +18,16 @@ var _ ValueDecoder = &PointerCodec{} // PointerCodec is the Codec used for pointers. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. +// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To +// override the default pointer encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for pointers. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) +// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) type PointerCodec struct { ecache typeEncoderCache dcache typeDecoderCache @@ -27,8 +35,8 @@ type PointerCodec struct { // NewPointerCodec returns a PointerCodec that has been initialized. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. +// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See +// [PointerCodec] for more details. func NewPointerCodec() *PointerCodec { return &PointerCodec{} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index a43daf00..14c9fd25 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "errors" "fmt" "reflect" @@ -20,8 +21,22 @@ var defaultSliceCodec = NewSliceCodec() // SliceCodec is the Codec used for slice values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. +// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To +// configure the slice encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go slices as empty +// BSON arrays, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in SliceCodec for the corresponding +// settings. type SliceCodec struct { // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of // BSON null. @@ -32,8 +47,8 @@ type SliceCodec struct { // NewSliceCodec returns a MapCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. +// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See +// [SliceCodec] for more details. func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) @@ -93,7 +108,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -102,7 +117,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index ff931b72..a8f885a8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -17,8 +17,16 @@ import ( // StringCodec is the Codec used for string values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. +// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To +// override the default string encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for strings. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.String, myStringEncoder) +// reg.RegisterKindDecoder(reflect.String, myStringDecoder) type StringCodec struct { // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. // If false, a string made from the raw object ID bytes will be used. Defaults to true. @@ -38,8 +46,8 @@ var ( // NewStringCodec returns a StringCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. +// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See +// [StringCodec] for more details. func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { stringOpt := bsonoptions.MergeStringCodecOptions(opts...) return &StringCodec{*stringOpt.DecodeObjectIDAsHex} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 4cde0a4d..f8d9690c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -60,8 +60,22 @@ type Zeroer interface { // StructCodec is the Codec used for struct values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. +// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. +// To configure the struct encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to omit zero-value structs when +// using the "omitempty" struct tag, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// OmitZeroStruct: true, +// }) +// +// See the deprecation notice for each field in StructCodec for the corresponding +// settings. type StructCodec struct { cache sync.Map // map[reflect.Type]*structDescription parser StructTagParser @@ -69,7 +83,7 @@ type StructCodec struct { // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the // destination value passed to Decode before unmarshaling BSON documents into them. // - // Deprecated: Use bson.Decoder.ZeroStructs instead. + // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. DecodeZeroStruct bool // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the @@ -82,7 +96,7 @@ type StructCodec struct { // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag // option is set. // - // Deprecated: Use bson.Encoder.OmitZeroStruct instead. + // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. EncodeOmitDefaultStruct bool // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. @@ -95,7 +109,8 @@ type StructCodec struct { // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The // default value is true. // - // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead. + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or + // options.BSONOptions.ErrorOnInlineDuplicates instead. OverwriteDuplicatedInlinedFields bool } @@ -104,8 +119,8 @@ var _ ValueDecoder = &StructCodec{} // NewStructCodec returns a StructCodec that uses p for struct tag parsing. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. +// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See +// [StructCodec] for more details. func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { if p == nil { return nil, errors.New("a StructTagParser must be provided to NewStructCodec") @@ -164,11 +179,11 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - if err != nil && err != errInvalidValue { + if err != nil && !errors.Is(err, errInvalidValue) { return err } - if err == errInvalidValue { + if errors.Is(err, errInvalidValue) { if desc.omitEmpty { continue } @@ -189,17 +204,17 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val encoder := desc.encoder - var zero bool + var empty bool if cz, ok := encoder.(CodecZeroer); ok { - zero = cz.IsTypeZero(rv.Interface()) + empty = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { - // isZero will not treat an interface rv as an interface, so we need to check for the - // zero interface separately. - zero = rv.IsNil() + // isEmpty will not treat an interface rv as an interface, so we need to check for the + // nil interface separately. + empty = rv.IsNil() } else { - zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } - if desc.omitEmpty && zero { + if desc.omitEmpty && empty { continue } @@ -239,8 +254,8 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val } func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { + var de *DecodeError + if !errors.As(original, &de) { return &DecodeError{ keys: []string{key}, wrapped: original, @@ -308,7 +323,7 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val for { name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -391,12 +406,15 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return nil } -func isZero(v reflect.Value, omitZeroStruct bool) bool { +func isEmpty(v reflect.Value, omitZeroStruct bool) bool { kind := v.Kind() if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { return v.Interface().(Zeroer).IsZero() } - if kind == reflect.Struct { + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Struct: if !omitZeroStruct { return false } @@ -410,7 +428,7 @@ func isZero(v reflect.Value, omitZeroStruct bool) bool { if ff.PkgPath != "" && !ff.Anonymous { continue // Private field } - if !isZero(v.Field(i), omitZeroStruct) { + if !isEmpty(v.Field(i), omitZeroStruct) { return false } } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index 7b005a99..22fb762c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -23,12 +23,26 @@ const ( // TimeCodec is the Codec used for time.Time values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. +// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. +// To configure the time.Time encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to ..., use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// UseLocalTimeZone: true, +// }) +// +// See the deprecation notice for each field in TimeCodec for the corresponding +// settings. type TimeCodec struct { // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. // - // Deprecated: Use bson.Decoder.UseLocalTimeZone instead. + // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone + // instead. UseLocalTimeZone bool } @@ -42,8 +56,8 @@ var ( // NewTimeCodec returns a TimeCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. +// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See +// [TimeCodec] for more details. func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 7eb10690..85254727 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -18,13 +18,27 @@ import ( // UIntCodec is the Codec used for uint values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. +// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To +// configure the uint encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal Go uint values as the +// minimum BSON int size that can represent the value, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// IntMinSize: true, +// }) +// +// See the deprecation notice for each field in UIntCodec for the corresponding +// settings. type UIntCodec struct { // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. // - // Deprecated: Use bson.Encoder.IntMinSize instead. + // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. EncodeToMinSize bool } @@ -38,8 +52,8 @@ var ( // NewUIntCodec returns a UIntCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. +// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See +// [UIntCodec] for more details. func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 4d279b7f..1e25570b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" @@ -442,7 +443,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { for { vr, err := ar.ReadValue() - if err == ErrEOA { + if errors.Is(err, ErrEOA) { break } if err != nil { @@ -466,7 +467,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { for { key, vr, err := dr.ReadElement() - if err == ErrEOD { + if errors.Is(err, ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 54c76bf7..bb52a0ec 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -313,7 +313,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { // convert hex to bytes bytes, err := hex.DecodeString(uuidNoHyphens) if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go index 2aca37a9..59ddfc44 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" "sync" @@ -613,7 +614,7 @@ func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { name, t, err := ejvr.p.readKey() if err != nil { - if err == ErrEOD { + if errors.Is(err, ErrEOD) { if ejvr.stack[ejvr.frame].mode == mCodeWithScope { _, err := ejvr.p.peekType() if err != nil { @@ -640,7 +641,7 @@ func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { t, err := ejvr.p.peekType() if err != nil { - if err == ErrEOA { + if errors.Is(err, ErrEOA) { ejvr.pop() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go index cd4843a3..43f3e4f3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go @@ -58,7 +58,7 @@ func (js *jsonScanner) nextToken() (*jsonToken, error) { c, err = js.readNextByte() } - if err == io.EOF { + if errors.Is(err, io.EOF) { return &jsonToken{t: jttEOF}, nil } else if err != nil { return nil, err @@ -198,7 +198,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { for { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -209,7 +209,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { case '\\': c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -248,7 +248,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { if utf16.IsSurrogate(rn) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -264,7 +264,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -325,17 +325,17 @@ func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) { c5, err := js.readNextByte() - if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) { + if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: true, p: p}, nil - } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) { + } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttNull, v: nil, p: p}, nil } else if bytes.Equal([]byte("fals"), lit) { if c5 == 'e' { c5, err = js.readNextByte() - if isValueTerminator(c5) || err == io.EOF { + if isValueTerminator(c5) || errors.Is(err, io.EOF) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: false, p: p}, nil } @@ -384,7 +384,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { for { c, err = js.readNextByte() - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return nil, err } @@ -413,7 +413,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else { s = nssInvalid @@ -430,7 +430,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawIntegerDigits @@ -455,7 +455,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawFractionDigits @@ -490,7 +490,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawExponentDigits diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 048b5eb9..af609847 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -6,9 +6,9 @@ // Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to // store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org. -// The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description -// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information and -// usage examples, check out the [Work with BSON] page in the Go Driver docs site. +// The BSON library handles marshaling and unmarshaling of values through a configurable codec system. For a description +// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information +// and usage examples, check out the [Work with BSON] page in the Go Driver docs site. // // # Raw BSON // @@ -38,7 +38,7 @@ // bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} // bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // -// When decoding BSON to a D or M, the following type mappings apply when unmarshalling: +// When decoding BSON to a D or M, the following type mappings apply when unmarshaling: // // 1. BSON int32 unmarshals to an int32. // 2. BSON int64 unmarshals to an int64. @@ -62,83 +62,78 @@ // 20. BSON DBPointer unmarshals to a primitive.DBPointer. // 21. BSON symbol unmarshals to a primitive.Symbol. // -// The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: +// The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are: // // 1. time.Time marshals to a BSON datetime. // 2. int8, int16, and int32 marshal to a BSON int32. // 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 // otherwise. -// 4. int64 marshals to BSON int64. +// 4. int64 marshals to BSON int64 (unless [Encoder.IntMinSize] is set). // 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// 6. uint, uint32, and uint64 marshal to a BSON int64 (unless [Encoder.IntMinSize] is set). +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshaling a BSON null or // undefined value into a string will yield the empty string.). // // # Structs // -// Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended +// Structs can be marshaled/unmarshaled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshaled or unmarshaled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshaling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshaled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is -// marshalled as a BSON null value. +// 4. A pointer field is marshaled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// marshaled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents -// unmarshalled into an interface{} field will be unmarshalled as a D. +// 5. When unmarshaling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// unmarshaled into an interface{} field will be unmarshaled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. // // This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new -// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON tags -// are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: +// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON +// tags are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: // // structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. -// The name may be empty in order to specify options without overriding the default field name. The following options can be used -// to configure behavior: +// The name may be empty in order to specify options without overriding the default field name. The following options can +// be used to configure behavior: // -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to -// the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if -// their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). -// Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered -// empty if their value is nil. By default, structs are only considered empty if the struct type implements the -// bsoncodec.Zeroer interface and the IsZero method returns true. Struct fields whose types do not implement Zeroer are -// never considered empty and will be marshalled as embedded documents. +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will be omitted from the marshaling if +// the field has an empty value, defined as false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. // NOTE: It is recommended that this tag be used for all slice and map fields. // // 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of -// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other -// types, this tag is ignored. +// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For +// other types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, -// it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be -// decoded without losing precision. For float64 or non-numeric types, this tag is ignored. +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles +// unmarshaled into that field will be truncated at the decimal point. For example, if 3.14 is unmarshaled into a +// field of type int, it will be unmarshaled as 3. If this tag is not specified, the decoder will throw an error if +// the value cannot be decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // // 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when -// marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be -// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a -// map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be -// {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are -// duplicated fields in the resulting document when an inlined struct is marshalled, the inlined field will be overwritten. -// If there are duplicated fields in the resulting document when an inlined map is marshalled, an error will be returned. -// This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be -// marshalled. For fields that are not maps or structs, this tag is ignored. +// marshaling and "un-flattened" when unmarshaling. This means that all of the fields in that struct/map will be +// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, +// if a map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will +// be {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If +// there are duplicated fields in the resulting document when an inlined struct is marshaled, the inlined field will +// be overwritten. If there are duplicated fields in the resulting document when an inlined map is marshaled, an +// error will be returned. This tag can be used with fields that are pointers to structs. If an inlined pointer field +// is nil, it will not be marshaled. For fields that are not maps or structs, this tag is ignored. // -// # Marshalling and Unmarshalling +// # Marshaling and Unmarshaling // -// Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. +// Manually marshaling and unmarshaling can be done with the Marshal and Unmarshal family of functions. // // [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/ package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index 24ab58fc..08c39514 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -164,9 +164,6 @@ func (d Decimal128) BigInt() (*big.Int, int, error) { // Would be handled by the logic below, but that's trivial and common. if high == 0 && low == 0 && exp == 0 { - if posSign { - return new(big.Int), 0, nil - } return new(big.Int), 0, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index 9bbaffac..c130e3ff 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -183,7 +183,7 @@ func processUniqueBytes() [5]byte { var b [5]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return b @@ -193,7 +193,7 @@ func readRandomUint32() uint32 { var b [4]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5dee..b3c1699b 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index 4269ed11..bf225953 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -279,21 +279,22 @@ func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { // This is the exposed reflection of the internal OCSP structures. -// The status values that can be expressed in OCSP. See RFC 6960. +// The status values that can be expressed in OCSP. See RFC 6960. +// These are used for the Response.Status field. const ( // Good means that the certificate is valid. - Good = iota + Good = 0 // Revoked means that the certificate has been deliberately revoked. - Revoked + Revoked = 1 // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown + Unknown = 2 // ServerFailed is unused and was never used (see // https://go-review.googlesource.com/#/c/18944). ParseResponse will // return a ResponseError when an error response is parsed. - ServerFailed + ServerFailed = 3 ) -// The enumerated reasons for revoking a certificate. See RFC 5280. +// The enumerated reasons for revoking a certificate. See RFC 5280. const ( Unspecified = 0 KeyCompromise = 1 diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90d..e2b298d8 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index b2a0b7c6..a8d7b06a 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool. The default build tool is the go command. Its supported patterns are described at https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. -Load may be used in Go projects that use alternative build systems, by -installing an appropriate "driver" program for the build system and -specifying its location in the GOPACKAGESDRIVER environment variable. -For example, -https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration -explains how to use the driver for Bazel. -The driver program is responsible for interpreting patterns in its -preferred notation and reporting information about the packages that -they identify. -(See driverRequest and driverResponse types for the JSON -schema used by the protocol. -Though the protocol is supported, these types are currently unexported; -see #64608 for a proposal to publish them.) - -Regardless of driver, all patterns with the prefix "query=", where query is a +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -86,7 +74,29 @@ for details. Most tools should pass their command-line arguments (after any flags) uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7db1d129..4335c1eb 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" @@ -17,31 +16,71 @@ import ( "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents // of overlay files. Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index cd375fbc..22305d9c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -35,23 +35,23 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } @@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - sizeserr = err response.dr.Compiler = compiler response.dr.Arch = arch - sizeswg.Done() + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -208,10 +210,7 @@ extractQueries: } } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } @@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - response := &driverResponse{ + response := &DriverResponse{ GoVersion: goVersion, } for dec := json.NewDecoder(buf); dec.More(); { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 81e9e6a7..f33b0afc 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -206,43 +206,6 @@ type Config struct { Overlay map[string][]byte } -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - // Load loads and returns the Go packages named by the given patterns. // // Config specifies loading options; @@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. // The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { if driver := findExternalDriver(cfg); driver != nil { response, err := driver(cfg, patterns...) if err != nil { @@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro } response, err := goListDriver(cfg, patterns...) - return response, false, err + if err != nil { + return nil, false, err + } + return response, false, nil } // A Package describes a loaded Go package. @@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 9bde15e3..9fffa9ad 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Gather the relevant packages from the manifest. items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) @@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) } // Request packages all at once from the client, diff --git a/vendor/modules.txt b/vendor/modules.txt index 8982c764..7f2b8a76 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,7 +17,7 @@ github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.12.0-rc.2 +# github.com/Microsoft/hcsshim v0.12.0-rc.3 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -56,9 +56,9 @@ github.com/asaskevich/govalidator # github.com/containerd/cgroups/v3 v3.0.2 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats -# github.com/containerd/containerd v1.7.12 -## explicit; go 1.19 -github.com/containerd/containerd/errdefs +# github.com/containerd/errdefs v0.1.0 +## explicit; go 1.20 +github.com/containerd/errdefs # github.com/containerd/stargz-snapshotter/estargz v0.15.1 ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz @@ -73,7 +73,7 @@ github.com/containers/common/pkg/password github.com/containers/common/pkg/report github.com/containers/common/pkg/report/camelcase github.com/containers/common/pkg/retry -# github.com/containers/image/v5 v5.29.3-0.20240207231441-93b4b55d865b +# github.com/containers/image/v5 v5.29.3-0.20240301163503-faa4f4fd0e4f ## explicit; go 1.19 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -164,7 +164,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/storage v1.52.0 +# github.com/containers/storage v1.52.1-0.20240301111729-226cffb1c4d2 ## explicit; go 1.20 github.com/containers/storage github.com/containers/storage/drivers @@ -192,6 +192,7 @@ github.com/containers/storage/pkg/directory github.com/containers/storage/pkg/dmesg github.com/containers/storage/pkg/fileutils github.com/containers/storage/pkg/fsutils +github.com/containers/storage/pkg/fsverity github.com/containers/storage/pkg/homedir github.com/containers/storage/pkg/idmap github.com/containers/storage/pkg/idtools @@ -275,7 +276,7 @@ github.com/docker/go-units # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/go-jose/go-jose/v3 v3.0.1 +# github.com/go-jose/go-jose/v3 v3.0.2 ## explicit; go 1.12 github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher @@ -323,7 +324,7 @@ github.com/go-openapi/runtime/yamlpc # github.com/go-openapi/spec v0.20.9 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.22.0 +# github.com/go-openapi/strfmt v0.22.1 ## explicit; go 1.19 github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.22.9 @@ -346,7 +347,7 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-containerregistry v0.17.0 +# github.com/google/go-containerregistry v0.19.0 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -354,7 +355,7 @@ github.com/google/go-containerregistry/pkg/v1/types # github.com/google/go-intervals v0.0.2 ## explicit; go 1.12 github.com/google/go-intervals/intervalset -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gorilla/mux v1.8.1 @@ -381,8 +382,8 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.6 -## explicit; go 1.19 +# github.com/klauspost/compress v1.17.7 +## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -394,6 +395,8 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/pgzip v1.2.6 ## explicit github.com/klauspost/pgzip +# github.com/kr/pretty v0.3.1 +## explicit; go 1.12 # github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e ## explicit; go 1.20 github.com/letsencrypt/boulder/core @@ -428,6 +431,9 @@ github.com/mitchellh/mapstructure # github.com/moby/sys/mountinfo v0.7.1 ## explicit; go 1.16 github.com/moby/sys/mountinfo +# github.com/moby/sys/user v0.1.0 +## explicit; go 1.17 +github.com/moby/sys/user # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -448,10 +454,7 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/image-tools v1.0.0-rc3 ## explicit github.com/opencontainers/image-tools/image -# github.com/opencontainers/runc v1.1.12 -## explicit; go 1.17 -github.com/opencontainers/runc/libcontainer/user -# github.com/opencontainers/runtime-spec v1.1.0 +# github.com/opencontainers/runtime-spec v1.2.0 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/selinux v1.11.0 @@ -489,6 +492,8 @@ github.com/secure-systems-lab/go-securesystemslib/encrypted # github.com/segmentio/ksuid v1.0.4 ## explicit; go 1.12 github.com/segmentio/ksuid +# github.com/shurcooL/sanitized_anchor_name v1.0.0 +## explicit # github.com/sigstore/fulcio v1.4.3 ## explicit; go 1.20 github.com/sigstore/fulcio/pkg/api @@ -503,7 +508,7 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey github.com/sigstore/rekor/pkg/generated/client/tlog github.com/sigstore/rekor/pkg/generated/models github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.1 +# github.com/sigstore/sigstore v1.8.2 ## explicit; go 1.20 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/oauth @@ -526,8 +531,8 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 ## explicit github.com/stefanberger/go-pkcs11uri -# github.com/stretchr/testify v1.8.4 -## explicit; go 1.20 +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require github.com/stretchr/testify/suite @@ -569,8 +574,8 @@ github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.2.0 ## explicit github.com/xeipuuv/gojsonschema -# go.mongodb.org/mongo-driver v1.13.1 -## explicit; go 1.13 +# go.mongodb.org/mongo-driver v1.14.0 +## explicit; go 1.18 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -606,6 +611,8 @@ go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 +## explicit; go 1.20 # go.opentelemetry.io/otel/metric v1.19.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric @@ -613,7 +620,9 @@ go.opentelemetry.io/otel/metric/embedded # go.opentelemetry.io/otel/trace v1.19.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace -# golang.org/x/crypto v0.19.0 +# go.opentelemetry.io/proto/otlp v1.0.0 +## explicit; go 1.17 +# golang.org/x/crypto v0.20.0 ## explicit; go 1.18 golang.org/x/crypto/cast5 golang.org/x/crypto/ed25519 @@ -631,16 +640,16 @@ golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20240119083558-1b970713d09a +# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.14.0 +# golang.org/x/mod v0.15.0 ## explicit; go 1.18 golang.org/x/mod/semver golang.org/x/mod/sumdb/note -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.21.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -648,7 +657,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.16.0 +# golang.org/x/oauth2 v0.17.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -672,7 +681,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.17.0 +# golang.org/x/tools v0.18.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata @@ -701,7 +710,7 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.59.0