Compare commits

...

9 Commits

Author SHA1 Message Date
Miloslav Trmač
c175e3e9d8 Merge pull request #2536 from TomSweeneyRedHat/dev/tsweeney/CVE-2025-21744-release-1.16
[release-1.16] Fix CVE-2025-27144
2025-03-04 18:16:45 +01:00
tomsweeneyredhat
670947188f [release-1.16] Fix CVE-2025-27144
Addresses CVE-2025-27144 by bumping github.com/go-jose/go-jose/v3 to
v3.0.4 and github.com/go-jose/go-jose/v4 to v4.0.5

Fixes: https://issues.redhat.com/browse/OCPBUGS-51251,
https://issues.redhat.com/browse/OCPBUGS-51252

[NO NEW TESTS NEEDED]

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2025-03-03 21:12:23 -05:00
Miloslav Trmač
8cafdb019a Merge pull request #2433 from lsm5/release-1.16-packit-downstream-constraint-backport
[backport release-1.16] Packit: constrain downstream koji job to fedora package
2024-10-08 21:29:00 +02:00
Lokesh Mandvekar
f544fcd196 Packit: constrain downstream koji job to fedora package
This constraint helps to avoid duplicate Packit jobs. The effect will
only be seen at the time of Fedora koji build and not on upstream. This
doesn't affect upstream.

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
(cherry picked from commit 6ad77a1bc4)
Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2024-10-08 16:22:15 +05:30
Lokesh Mandvekar
eebbc35ba9 Packit: split out ELN jobs and reuse fedora downstream targets
ELN is kind of a midway between Fedora and RHEL, so it's best to mention
ELN jobs separately. This will also allow reusing fedora targets using
YAML anchors for TMT tests.

This commit also mentions fedora-40 targets separately for copr_build
jobs so that once fedora-41 is released, fedora-40 jobs continue to
trigger.

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
(cherry picked from commit e39efb10e1)
Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2024-09-16 17:14:34 +05:30
Lokesh Mandvekar
b18c57b11a Packit: Enable sidetags for bodhi updates
Packit now has sidetag support for adding multiple builds into a single
bodhi update.

Since we release c/ccommon, skopeo, buildah and podman often
almoost simultaneously, we should release them to Fedora in a single
bodhi update using sidetags so all builds can be tested together.

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
(cherry picked from commit 7da6ea07ca)
Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2024-09-16 17:14:34 +05:30
Miloslav Trmač
270b5358c4 Merge pull request #2406 from TomSweeneyRedHat/dev/tsweeney/v1.16.1
[release-1.16] Bump Skopeo to v1.16.1, c/common to v0.60.2, c/image to v5.32.2
2024-08-22 00:15:34 +02:00
tomsweeneyredhat
fe07cc958a [release-1.16] Bump to Skopeo v1.16.1
As the title says.  Bumping to v1.1.6.1 and we will
target this for RHEL 9.5/10.0 Beta

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2024-08-21 15:00:18 -04:00
tomsweeneyredhat
a47ee582a3 [release-1.16] Bump c/common to v0.60.2, c/image to v5.32.2
Bumping to the latest version of c/common and c/image to get
the latest updates for zstd:chunked and the multiple
signature keys updates.

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2024-08-21 14:43:05 -04:00
218 changed files with 12284 additions and 3392 deletions

View File

@@ -18,6 +18,8 @@ packages:
specfile_path: rpm/skopeo.spec
skopeo-rhel:
specfile_path: rpm/skopeo.spec
skopeo-eln:
specfile_path: rpm/skopeo.spec
srpm_build_deps:
- make
@@ -30,8 +32,21 @@ jobs:
failure_comment:
message: "Ephemeral COPR build failed. @containers/packit-build please check."
targets:
fedora-all-x86_64: {}
fedora-all-aarch64: {}
- fedora-development-x86_64
- fedora-development-aarch64
- fedora-latest-x86_64
- fedora-latest-aarch64
- fedora-latest-stable-x86_64
- fedora-latest-stable-aarch64
- fedora-40-x86_64
- fedora-40-aarch64
enable_net: true
- job: copr_build
trigger: pull_request
packages: [skopeo-eln]
notifications: *copr_build_failure_notification
targets:
fedora-eln-x86_64:
additional_repos:
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/x86_64/"
@@ -77,7 +92,7 @@ jobs:
trigger: release
packages: [skopeo-fedora]
update_release: false
dist_git_branches:
dist_git_branches: &fedora_targets
- fedora-all
# Sync to CentOS Stream
@@ -88,12 +103,14 @@ jobs:
dist_git_branches:
- c10s
# Fedora Koji build
- job: koji_build
trigger: commit
dist_git_branches:
- fedora-all
- job: bodhi_update
trigger: commit
dist_git_branches:
- fedora-branched # rawhide updates are created automatically
packages: [skopeo-fedora]
sidetag_group: podman-releases
# Dependents are not rpm dependencies, but the package whose bodhi update
# should include this package.
# Ref: https://packit.dev/docs/fedora-releases-guide/releasing-multiple-packages
dependents:
- podman
dist_git_branches: *fedora_targets

36
go.mod
View File

@@ -7,8 +7,8 @@ go 1.21.0
require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/containers/common v0.60.0
github.com/containers/image/v5 v5.32.0
github.com/containers/common v0.60.2
github.com/containers/image/v5 v5.32.2
github.com/containers/ocicrypt v1.2.0
github.com/containers/storage v1.55.0
github.com/docker/distribution v2.8.3+incompatible
@@ -18,10 +18,10 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
github.com/stretchr/testify v1.10.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/term v0.22.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/term v0.28.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -48,9 +48,9 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
github.com/go-openapi/errors v0.22.0 // indirect
@@ -80,7 +80,7 @@ require (
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-sqlite3 v1.14.22 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
@@ -113,7 +113,7 @@ require (
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/vbatts/tar-split v0.11.5 // indirect
github.com/vbauerster/mpb/v8 v8.7.4 // indirect
github.com/vbauerster/mpb/v8 v8.7.5 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
@@ -124,14 +124,14 @@ require (
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.22.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/mod v0.19.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/grpc v1.64.1 // indirect
google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/protobuf v1.34.1 // indirect
)

88
go.sum
View File

@@ -37,10 +37,10 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containers/common v0.60.0 h1:QMNygqiiit9LU/yqee9Dv0N0oQ+rQq41ElfdOBQtw7w=
github.com/containers/common v0.60.0/go.mod h1:dtKVe11xkV89tqzRX9s/B0ORjeB2dy5UB46aGjunMn8=
github.com/containers/image/v5 v5.32.0 h1:yjbweazPfr8xOzQ2hkkYm1A2V0jN96/kES6Gwyxj7hQ=
github.com/containers/image/v5 v5.32.0/go.mod h1:x5e0RDfGaY6bnQ13gJ2LqbfHvzssfB/y5a8HduGFxJc=
github.com/containers/common v0.60.2 h1:utcwp2YkO8c0mNlwRxsxfOiqfj157FRrBjxgjR6f+7o=
github.com/containers/common v0.60.2/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54=
github.com/containers/image/v5 v5.32.2 h1:SzNE2Y6sf9b1GJoC8qjCuMBXwQrACFp4p0RK15+4gmQ=
github.com/containers/image/v5 v5.32.2/go.mod h1:v1l73VeMugfj/QtKI+jhYbwnwFCFnNGckvbST3rQ5Hk=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
@@ -85,13 +85,13 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
@@ -153,8 +153,8 @@ github.com/google/go-containerregistry v0.20.0/go.mod h1:YCMFNQeeXeLF+dnhhWkqDIt
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -202,8 +202,8 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
@@ -229,10 +229,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -305,8 +305,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI=
github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@@ -320,8 +320,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vbauerster/mpb/v8 v8.7.4 h1:p4f16iMfUt3PkAC73SCzAtgtSf8TYDqEbJUT3odPrPo=
github.com/vbauerster/mpb/v8 v8.7.4/go.mod h1:r1B5k2Ljj5KJFCekfihbiqyV4VaaRTANYmvWA2btufI=
github.com/vbauerster/mpb/v8 v8.7.5 h1:hUF3zaNsuaBBwzEFoCvfuX3cpesQXZC0Phm/JcHZQ+c=
github.com/vbauerster/mpb/v8 v8.7.5/go.mod h1:bRCnR7K+mj5WXKsy0NWB6Or+wctYGvVwKn6huwvxKa0=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -371,11 +371,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -383,8 +383,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -398,11 +398,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -410,8 +410,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -424,23 +424,23 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -453,8 +453,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -485,8 +485,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -11,6 +11,7 @@ import (
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
@@ -34,10 +35,10 @@ var (
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
type bpDetectCompressionStepData struct {
isCompressed bool
format compressiontypes.Algorithm // Valid if isCompressed
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
isCompressed bool
format compressiontypes.Algorithm // Valid if isCompressed
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob.
}
// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
@@ -51,15 +52,25 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
}
stream.reader = reader
if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName {
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
if err != nil {
return bpDetectCompressionStepData{}, err
}
if tocDigest != nil {
format = compression.ZstdChunked
}
}
res := bpDetectCompressionStepData{
isCompressed: decompressor != nil,
format: format,
decompressor: decompressor,
}
if res.isCompressed {
res.srcCompressorName = format.Name()
res.srcCompressorBaseVariantName = format.BaseVariantName()
} else {
res.srcCompressorName = internalblobinfocache.Uncompressed
res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed
}
if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
@@ -70,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
type bpCompressionStepData struct {
operation bpcOperation // What we are actually doing
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
closers []io.Closer // Objects to close after the upload is done, if any.
operation bpcOperation // What we are actually doing
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob.
uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob.
closers []io.Closer // Objects to close after the upload is done, if any.
}
type bpcOperation int
@@ -128,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp
// We cant do anything with an encrypted blob unless decrypted.
logrus.Debugf("Using original blob without modification for encrypted blob")
return &bpCompressionStepData{
operation: bpcOpPreserveOpaque,
uploadedOperation: types.PreserveOriginal,
uploadedAlgorithm: nil,
srcCompressorName: internalblobinfocache.UnknownCompression,
uploadedCompressorName: internalblobinfocache.UnknownCompression,
operation: bpcOpPreserveOpaque,
uploadedOperation: types.PreserveOriginal,
uploadedAlgorithm: nil,
srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
}, nil
}
return nil, nil
@@ -156,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
Digest: "",
Size: -1,
}
specificVariantName := uploadedAlgorithm.Name()
if specificVariantName == uploadedAlgorithm.BaseVariantName() {
specificVariantName = internalblobinfocache.UnknownCompression
}
return &bpCompressionStepData{
operation: bpcOpCompressUncompressed,
uploadedOperation: types.Compress,
uploadedAlgorithm: uploadedAlgorithm,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: uploadedAlgorithm.Name(),
closers: []io.Closer{reader},
operation: bpcOpCompressUncompressed,
uploadedOperation: types.Compress,
uploadedAlgorithm: uploadedAlgorithm,
uploadedAnnotations: annotations,
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(),
uploadedCompressorSpecificVariantName: specificVariantName,
closers: []io.Closer{reader},
}, nil
}
return nil, nil
@@ -196,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
Digest: "",
Size: -1,
}
specificVariantName := ic.compressionFormat.Name()
if specificVariantName == ic.compressionFormat.BaseVariantName() {
specificVariantName = internalblobinfocache.UnknownCompression
}
succeeded = true
return &bpCompressionStepData{
operation: bpcOpRecompressCompressed,
uploadedOperation: types.PreserveOriginal,
uploadedAlgorithm: ic.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: ic.compressionFormat.Name(),
closers: []io.Closer{decompressed, recompressed},
operation: bpcOpRecompressCompressed,
uploadedOperation: types.PreserveOriginal,
uploadedAlgorithm: ic.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(),
uploadedCompressorSpecificVariantName: specificVariantName,
closers: []io.Closer{decompressed, recompressed},
}, nil
}
return nil, nil
@@ -225,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
Size: -1,
}
return &bpCompressionStepData{
operation: bpcOpDecompressCompressed,
uploadedOperation: types.Decompress,
uploadedAlgorithm: nil,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: internalblobinfocache.Uncompressed,
closers: []io.Closer{s},
operation: bpcOpDecompressCompressed,
uploadedOperation: types.Decompress,
uploadedAlgorithm: nil,
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed,
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
closers: []io.Closer{s},
}, nil
}
return nil, nil
@@ -268,11 +292,15 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
algorithm = nil
}
return &bpCompressionStepData{
operation: bpcOp,
uploadedOperation: uploadedOp,
uploadedAlgorithm: algorithm,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: detected.srcCompressorName,
operation: bpcOp,
uploadedOperation: uploadedOp,
uploadedAlgorithm: algorithm,
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
// We only record the base variant of the format on upload; we didnt do anything with
// the TOC, we dont know whether it matches the blob digest, so we dont want to trigger
// reuse of any kind between the blob digest and the TOC digest.
uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
}
}
@@ -308,6 +336,15 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
// No useful information
case bpcOpCompressUncompressed:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
if d.uploadedAnnotations != nil {
tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations)
if err != nil {
return fmt.Errorf("parsing just-created compression annotations: %w", err)
}
if tocDigest != nil {
c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest)
}
}
case bpcOpDecompressCompressed:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
case bpcOpRecompressCompressed, bpcOpPreserveCompressed:
@@ -323,29 +360,27 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
if d.srcCompressorName == "" || d.uploadedCompressorName == "" {
return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)",
d.srcCompressorName, d.uploadedCompressorName)
if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" {
return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)",
d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName)
}
if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms.
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
//
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
// inconsistent data to be logged.
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
BaseVariantCompressor: d.uploadedCompressorBaseVariantName,
SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName,
SpecificVariantAnnotations: d.uploadedAnnotations,
})
}
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
d.srcCompressorName != internalblobinfocache.UnknownCompression {
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms, see above.
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
// If the source is already using some TOC-dependent variant, we either copied the
// blob as is, or perhaps decompressed it; either way we dont trust the TOC digest,
// so record neither the variant name, nor the TOC digest.
c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{
BaseVariantCompressor: d.srcCompressorBaseVariantName,
SpecificVariantCompressor: internalblobinfocache.UnknownCompression,
SpecificVariantAnnotations: nil,
})
}
return nil
}

View File

@@ -48,7 +48,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
Annotations: stream.info.Annotations,
}
// DecryptLayer supposedly returns a digest of the decrypted stream.
// In pratice, that value is never set in the current implementation.
// In practice, that value is never set in the current implementation.
// And we shouldnt use it anyway, because it is not trusted: encryption can be made to a public key,
// i.e. it doesnt authenticate the origin of the metadata in any way.
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)

View File

@@ -121,7 +121,7 @@ func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
}
}
// mark100PercentComplete marks the progres bars as 100% complete;
// mark100PercentComplete marks the progress bars as 100% complete;
// it may do so by possibly advancing the current state if it is below the known total.
func (bar *progressBar) mark100PercentComplete() {
if bar.originalSize > 0 {

View File

@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"reflect"
"slices"
"strings"
@@ -149,6 +150,28 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
}
// HACK: Dont combine zstd:chunked and encryption.
// zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption
// to support decrypting arbitrary subsets of the stream. Thats plausible but not supported using the encryption API we have.
// Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without
// encryption. (That can be determined from the unencrypted config anyway, but, still...)
//
// Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of
// hard-coding zstd:chunked / zstd.
if ic.c.options.OciEncryptLayers != nil {
format := ic.compressionFormat
if format == nil {
format = defaultCompressionFormat
}
if format.Name() == compressiontypes.ZstdChunkedAlgorithmName {
if ic.requireCompressionFormatMatch {
return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead")
}
logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead")
ic.compressionFormat = &compression.Zstd
}
}
// Decide whether we can substitute blobs with semantic equivalents:
// - Dont do that if we cant modify the manifest at all
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
@@ -192,7 +215,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
noPendingManifestUpdates := ic.noPendingManifestUpdates()
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
if err != nil {
@@ -866,21 +889,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
// of the generic code in this package.
res := types.BlobInfo{
Digest: reusedBlob.Digest,
Size: reusedBlob.Size,
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
Digest: reusedBlob.Digest,
Size: reusedBlob.Size,
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
// FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isnt
// (but those annotations being left with incorrect values should not break pulls).
Annotations: maps.Clone(inputInfo.Annotations),
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
CompressionOperation: reusedBlob.CompressionOperation,
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
}
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
// if the blob was substituted; otherwise, fill it in based
// if the blob was substituted; otherwise, it is optional, and if not set, fill it in based
// on what we know from the srcInfos we were given.
if reusedBlob.Digest == inputInfo.Digest {
res.CompressionOperation = inputInfo.CompressionOperation
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
if res.CompressionOperation == types.PreserveOriginal {
res.CompressionOperation = inputInfo.CompressionOperation
}
if res.CompressionAlgorithm == nil {
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
}
}
if len(reusedBlob.CompressionAnnotations) != 0 {
if res.Annotations == nil {
res.Annotations = map[string]string{}
}
maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations)
}
return res
}

View File

@@ -332,6 +332,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
originalCandidateKnownToBeMissing := false
if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
@@ -341,9 +342,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
if haveBlob {
return true, reusedInfo, nil
}
originalCandidateKnownToBeMissing = true
} else {
logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
// We can get here with a blob detected to be zstd when the user wants a zstd:chunked.
// In that case we keep originalCandiateKnownToBeMissing = false, so that if we find
// a BIC entry for this blob, we do use that entry and return a zstd:chunked entry
// with the BICs annotations.
// This is not quite correct, it only works if the BIC also contains an acceptable _location_.
// Ideally, we could look up just the compression algorithm/annotations for info.digest,
// and use it even if no location candidate exists and the original dandidate is present.
}
// Then try reusing blobs from other locations.
@@ -387,7 +396,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// for it in the current repo.
candidateRepo = reference.TrimNamed(d.ref.ref)
}
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
if originalCandidateKnownToBeMissing &&
candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
logrus.Debug("... Already tried the primary destination")
continue
}
@@ -427,10 +437,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
return true, private.ReusedBlob{
Digest: candidate.Digest,
Size: size,
CompressionOperation: candidate.CompressionOperation,
CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
Digest: candidate.Digest,
Size: size,
CompressionOperation: candidate.CompressionOperation,
CompressionAlgorithm: candidate.CompressionAlgorithm,
CompressionAnnotations: candidate.CompressionAnnotations,
}, nil
}
return false, private.ReusedBlob{}, nil

View File

@@ -27,7 +27,14 @@ func (bic *v1OnlyBlobInfoCache) Open() {
func (bic *v1OnlyBlobInfoCache) Close() {
}
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
return ""
}
func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
}
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) {
}
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {

View File

@@ -26,19 +26,40 @@ type BlobInfoCache2 interface {
// Close destroys state created by Open().
Close()
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
// or Uncompressed or UnknownCompression.
// WARNING: Only call this with LOCALLY VERIFIED data; dont record a compressor for a
// digest just because some remote author claims so (e.g. because a manifest says so);
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
// Returns "" if the uncompressed digest is unknown.
UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest)
// RecordDigestCompressorData records data for the blob with the specified digest.
// WARNING: Only call this with LOCALLY VERIFIED data:
// - dont record a compressor for a digest just because some remote author claims so
// (e.g. because a manifest says so);
// - dont record the non-base variant or annotations if we are not _sure_ that the base variant
// and the blobs digest match the non-base variants annotations (e.g. because we saw them
// in a manifest)
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData)
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
}
// DigestCompressorData is information known about how a blob is compressed.
// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.)
type DigestCompressorData struct {
BaseVariantCompressor string // A compressors base variant name, or Uncompressed or UnknownCompression.
// The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression:
SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant)
SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant.
}
// CandidateLocations2Options are used in CandidateLocations2.
type CandidateLocations2Options struct {
// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
@@ -51,9 +72,10 @@ type CandidateLocations2Options struct {
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
type BICReplacementCandidate2 struct {
Digest digest.Digest
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
UnknownLocation bool // is true when `Location` for this blob is not set
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
Digest digest.Digest
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm
UnknownLocation bool // is true when `Location` for this blob is not set
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
}

View File

@@ -76,6 +76,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob
Size: blob.Size,
CompressionOperation: blob.CompressionOperation,
CompressionAlgorithm: blob.CompressionAlgorithm,
// CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated
// annotations, and we didnt use the blob.Annotations field previously, so well
// continue not using it.
}, nil
}

View File

@@ -205,11 +205,6 @@ type ReuseConditions struct {
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
if c.RequiredCompression != nil {
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we dont record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
}
if candidateCompression == nil ||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
return false

View File

@@ -134,9 +134,14 @@ type ReusedBlob struct {
Size int64 // Must be provided
// The following compression fields should be set when the reuse substitutes
// a differently-compressed blob.
// They may be set also to change from a base variant to a specific variant of an algorithm.
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
// Annotations that should be added, for CompressionAlgorithm. Note that they might need to be
// added even if the digest doesnt change (if we found the annotations in a cache).
CompressionAnnotations map[string]string
MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
}

View File

@@ -202,7 +202,7 @@ func (m *Schema2) ConfigInfo() types.BlobInfo {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *Schema2) LayerInfos() []LayerInfo {
blobs := []LayerInfo{}
blobs := make([]LayerInfo, 0, len(m.LayersDescriptors))
for _, layer := range m.LayersDescriptors {
blobs = append(blobs, LayerInfo{
BlobInfo: BlobInfoFromSchema2Descriptor(layer),

View File

@@ -95,7 +95,7 @@ func (m *OCI1) ConfigInfo() types.BlobInfo {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *OCI1) LayerInfos() []LayerInfo {
blobs := []LayerInfo{}
blobs := make([]LayerInfo, 0, len(m.Layers))
for _, layer := range m.Layers {
blobs = append(blobs, LayerInfo{
BlobInfo: BlobInfoFromOCI1Descriptor(layer),

View File

@@ -151,9 +151,9 @@ func openRepo(path string) (*C.struct_OstreeRepo, error) {
var cerr *C.GError
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
repo := C.ostree_repo_new(pathc)
file := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(file))
repo := C.ostree_repo_new(file)
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
if !r {
C.g_object_unref(C.gpointer(repo))

View File

@@ -25,57 +25,133 @@ const replacementAttempts = 5
// This is a heuristic/guess, and could well use a different value.
const replacementUnknownLocationAttempts = 2
// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob
// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options.
// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest,
// which can be later combined with information about a location.
type CandidateTemplate struct {
digest digest.Digest
compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm
}
// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable
// for a CandidateLocations* call with v2Options.
//
// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
// if not nil, the call is assumed to be CandidateLocations2.
//
// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2
func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) {
func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate {
if v2Options == nil {
return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used.
return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used.
digest: digest,
}
}
var op types.LayerCompression
var algo *compression.Algorithm
switch compressorName {
requiredCompression := "nil"
if v2Options.RequiredCompression != nil {
requiredCompression = v2Options.RequiredCompression.Name()
}
switch data.BaseVariantCompressor {
case blobinfocache.Uncompressed:
op = types.Decompress
algo = nil
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: v2Options.PossibleManifestFormats,
RequiredCompression: v2Options.RequiredCompression,
}, nil) {
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v",
digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
return nil
}
return &CandidateTemplate{
digest: digest,
compressionOperation: types.Decompress,
compressionAlgorithm: nil,
compressionAnnotations: nil,
}
case blobinfocache.UnknownCompression:
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2
return nil // Not allowed with CandidateLocations2
default:
op = types.Compress
algo_, err := compression.AlgorithmByName(compressorName)
// See if we can use the specific variant, first.
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor)
if err != nil {
logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v",
data.SpecificVariantCompressor, digest.String(), err)
} else {
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: v2Options.PossibleManifestFormats,
RequiredCompression: v2Options.RequiredCompression,
}, &algo) {
logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v",
data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
} else {
return &CandidateTemplate{
digest: digest,
compressionOperation: types.Compress,
compressionAlgorithm: &algo,
compressionAnnotations: data.SpecificVariantAnnotations,
}
}
}
}
// Try the base variant.
algo, err := compression.AlgorithmByName(data.BaseVariantCompressor)
if err != nil {
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
digest.String(), compressorName, err)
return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
digest.String(), data.BaseVariantCompressor, err)
return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
}
algo = &algo_
}
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: v2Options.PossibleManifestFormats,
RequiredCompression: v2Options.RequiredCompression,
}, algo) {
requiredCompresssion := "nil"
if v2Options.RequiredCompression != nil {
requiredCompresssion = v2Options.RequiredCompression.Name()
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: v2Options.PossibleManifestFormats,
RequiredCompression: v2Options.RequiredCompression,
}, &algo) {
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats)
return nil
}
return &CandidateTemplate{
digest: digest,
compressionOperation: types.Compress,
compressionAlgorithm: &algo,
compressionAnnotations: nil,
}
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats)
return false, types.PreserveOriginal, nil
}
return true, op, algo
}
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type CandidateWithTime struct {
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
}
// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen)
func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime {
return CandidateWithTime{
candidate: blobinfocache.BICReplacementCandidate2{
Digest: template.digest,
CompressionOperation: template.compressionOperation,
CompressionAlgorithm: template.compressionAlgorithm,
CompressionAnnotations: template.compressionAnnotations,
UnknownLocation: false,
Location: location,
},
lastSeen: lastSeen,
}
}
// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location.
func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime {
return CandidateWithTime{
candidate: blobinfocache.BICReplacementCandidate2{
Digest: template.digest,
CompressionOperation: template.compressionOperation,
CompressionAlgorithm: template.compressionAlgorithm,
CompressionAnnotations: template.compressionAnnotations,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
lastSeen: time.Time{},
}
}
// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize,
@@ -91,35 +167,35 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
// First, deal with the primaryDigest/uncompressedDigest cases:
if xi.Candidate.Digest != xj.Candidate.Digest {
if xi.candidate.Digest != xj.candidate.Digest {
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
if xi.Candidate.Digest == css.primaryDigest {
if xi.candidate.Digest == css.primaryDigest {
return -1
}
if xj.Candidate.Digest == css.primaryDigest {
if xj.candidate.Digest == css.primaryDigest {
return 1
}
if css.uncompressedDigest != "" {
if xi.Candidate.Digest == css.uncompressedDigest {
if xi.candidate.Digest == css.uncompressedDigest {
return 1
}
if xj.Candidate.Digest == css.uncompressedDigest {
if xj.candidate.Digest == css.uncompressedDigest {
return -1
}
}
} else { // xi.Candidate.Digest == xj.Candidate.Digest
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
return -xi.LastSeen.Compare(xj.LastSeen)
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
return -xi.lastSeen.Compare(xj.lastSeen)
}
}
// Neither of the digests are primaryDigest/uncompressedDigest:
if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time
if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time
return -cmp
}
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest)
return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest)
}
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
@@ -138,7 +214,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
uncompressedDigest: uncompressedDigest,
}).compare)
for _, candidate := range cs {
if candidate.Candidate.UnknownLocation {
if candidate.candidate.UnknownLocation {
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
} else {
knownLocationCandidates = append(knownLocationCandidates, candidate)
@@ -150,11 +226,11 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates))
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
for i := 0; i < knownLocationCandidatesUsed; i++ {
res[i] = knownLocationCandidates[i].Candidate
res[i] = knownLocationCandidates[i].candidate
}
// If candidates with unknown location are found, lets add them to final list
for i := 0; i < unknownLocationCandidatesUsed; i++ {
res = append(res, unknownLocationCandidates[i].Candidate)
res = append(res, unknownLocationCandidates[i].candidate)
}
return res
}

View File

@@ -24,10 +24,11 @@ type locationKey struct {
type cache struct {
mutex sync.Mutex
// The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
uncompressedDigests map[digest.Digest]digest.Digest
uncompressedDigestsByTOC map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression
}
// New returns a BlobInfoCache implementation which is in-memory only.
@@ -44,10 +45,11 @@ func New() types.BlobInfoCache {
func new2() *cache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]string{},
uncompressedDigests: map[digest.Digest]digest.Digest{},
uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]blobinfocache.DigestCompressorData{},
}
}
@@ -104,6 +106,30 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
anyDigestSet.Add(anyDigest)
}
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
// Returns "" if the uncompressed digest is unknown.
func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok {
return d
}
return ""
}
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed {
logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
}
mem.uncompressedDigestsByTOC[tocDigest] = uncompressed
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
@@ -118,19 +144,40 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
locationScope[location] = time.Now() // Possibly overwriting an older entry.
}
// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
// algorithm, or uncompressed, or that we no longer know.
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
// RecordDigestCompressorData records data for the blob with the specified digest.
// WARNING: Only call this with LOCALLY VERIFIED data:
// - dont record a compressor for a digest just because some remote author claims so
// (e.g. because a manifest says so);
// - dont record the non-base variant or annotations if we are not _sure_ that the base variant
// and the blobs digest match the non-base variants annotations (e.g. because we saw them
// in a manifest)
//
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName)
if previous, ok := mem.compressors[anyDigest]; ok {
if previous.BaseVariantCompressor != data.BaseVariantCompressor {
logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor)
} else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression &&
previous.SpecificVariantCompressor != data.SpecificVariantCompressor {
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor)
}
// We dont check SpecificVariantAnnotations for equality, its possible that their generation is not deterministic.
// Preserve specific variant information if the incoming data does not have it.
if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression &&
previous.SpecificVariantCompressor != blobinfocache.UnknownCompression {
data.SpecificVariantCompressor = previous.SpecificVariantCompressor
data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations
}
}
if compressorName == blobinfocache.UnknownCompression {
delete(mem.compressors, blobDigest)
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
delete(mem.compressors, anyDigest)
return
}
mem.compressors[blobDigest] = compressorName
mem.compressors[anyDigest] = data
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
@@ -140,38 +187,25 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
// with unknown compression.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
compressorName := blobinfocache.UnknownCompression
if v, ok := mem.compressors[digest]; ok {
compressorName = v
compressionData := blobinfocache.DigestCompressorData{
BaseVariantCompressor: blobinfocache.UnknownCompression,
SpecificVariantCompressor: blobinfocache.UnknownCompression,
SpecificVariantAnnotations: nil,
}
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
if !ok {
if v, ok := mem.compressors[digest]; ok {
compressionData = v
}
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
if template == nil {
return candidates
}
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
if len(locations) > 0 {
for l, t := range locations {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
Location: l,
},
LastSeen: t,
})
candidates = append(candidates, template.CandidateWithLocation(l, t))
}
} else if v2Options != nil {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: time.Time{},
})
candidates = append(candidates, template.CandidateWithUnknownLocation())
}
return candidates
}

View File

@@ -34,6 +34,19 @@ func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
}
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
// Returns "" if the uncompressed digest is unknown.
func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
return ""
}
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {

View File

@@ -3,6 +3,7 @@ package sqlite
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"sync"
@@ -295,6 +296,24 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
`PRIMARY KEY (transport, scope, digest, location)
)`,
},
{
"DigestTOCUncompressedPairs",
`CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` +
// index implied by PRIMARY KEY
`tocDigest TEXT PRIMARY KEY NOT NULL,` +
`uncompressedDigest TEXT NOT NULL
)`,
},
{
"DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors.
`CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` +
// index implied by PRIMARY KEY
`digest TEXT PRIMARY KEY NOT NULL,` +
// The compressor is not `UnknownCompression`.
`specificVariantCompressor TEXT NOT NULL,
specificVariantAnnotations BLOB NOT NULL
)`,
},
}
_, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
@@ -385,6 +404,57 @@ func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
// Returns "" if the uncompressed digest is unknown.
func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
if err != nil {
return "", err
}
if found {
d, err := digest.Parse(uncompressedString)
if err != nil {
return "", err
}
return d, nil
}
return "", nil
})
if err != nil {
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return res
}
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
if err != nil {
return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest)
}
if gotPrevious {
previous, err := digest.Parse(previousString)
if err != nil {
return void{}, err
}
if previous != uncompressed {
logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
}
}
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)",
tocDigest.String(), uncompressed.String()); err != nil {
return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err)
}
return void{}, nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) {
@@ -398,29 +468,58 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
// or Uncompressed or UnknownCompression.
// WARNING: Only call this with LOCALLY VERIFIED data; dont record a compressor for a
// digest just because some remote author claims so (e.g. because a manifest says so);
// RecordDigestCompressorData records data for the blob with the specified digest.
// WARNING: Only call this with LOCALLY VERIFIED data:
// - dont record a compressor for a digest just because some remote author claims so
// (e.g. because a manifest says so);
// - dont record the non-base variant or annotations if we are not _sure_ that the base variant
// and the blobs digest match the non-base variants annotations (e.g. because we saw them
// in a manifest)
//
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
if err != nil {
return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
return void{}, fmt.Errorf("looking for compressor of %q", anyDigest)
}
if gotPrevious && previous != compressorName {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName)
warned := false
if gotPrevious && previous != data.BaseVariantCompressor {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
warned = true
}
if compressorName == blobinfocache.UnknownCompression {
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
}
if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil {
return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err)
}
} else {
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
anyDigest.String(), compressorName); err != nil {
return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err)
anyDigest.String(), data.BaseVariantCompressor); err != nil {
return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err)
}
}
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
if !warned { // Dont warn twice about the same digest
prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String())
if err != nil {
return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest)
}
if found && data.SpecificVariantCompressor != prevSVC {
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor)
}
}
annotations, err := json.Marshal(data.SpecificVariantAnnotations)
if err != nil {
return void{}, err
}
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)",
anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil {
return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err)
}
}
return void{}, nil
@@ -433,18 +532,33 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
// with unknown compression.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
compressorName := blobinfocache.UnknownCompression
compressionData := blobinfocache.DigestCompressorData{
BaseVariantCompressor: blobinfocache.UnknownCompression,
SpecificVariantCompressor: blobinfocache.UnknownCompression,
SpecificVariantAnnotations: nil,
}
if v2Options != nil {
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
if err != nil {
return nil, fmt.Errorf("scanning compressorName: %w", err)
}
if found {
compressorName = compressor
var baseVariantCompressor string
var specificVariantCompressor sql.NullString
var annotationBytes []byte
switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+
"FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()).
Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); {
case errors.Is(err, sql.ErrNoRows): // Do nothing
case err != nil:
return nil, fmt.Errorf("scanning compressor data: %w", err)
default:
compressionData.BaseVariantCompressor = baseVariantCompressor
if specificVariantCompressor.Valid && annotationBytes != nil {
compressionData.SpecificVariantCompressor = specificVariantCompressor.String
if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil {
return nil, err
}
}
}
}
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
if !ok {
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
if template == nil {
return candidates, nil
}
@@ -463,15 +577,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
if err := rows.Scan(&location, &time); err != nil {
return nil, fmt.Errorf("scanning candidate: %w", err)
}
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
Location: types.BICLocationReference{Opaque: location},
},
LastSeen: time,
})
candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time))
rowAdded = true
}
if err := rows.Err(); err != nil {
@@ -479,16 +585,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
}
if !rowAdded && v2Options != nil {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: time.Time{},
})
candidates = append(candidates, template.CandidateWithUnknownLocation())
}
return candidates, nil
}
@@ -516,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
if err != nil {
return nil, err
}
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
// (In the extreme, we could turn _everything_ this function does into a single query.
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
if err != nil {
return nil, fmt.Errorf("querying for other digests: %w", err)
}
defer rows.Close()
for rows.Next() {
var otherDigestString string
if err := rows.Scan(&otherDigestString); err != nil {
return nil, fmt.Errorf("scanning other digest: %w", err)
}
otherDigest, err := digest.Parse(otherDigestString)
if uncompressedDigest != "" {
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
// (In the extreme, we could turn _everything_ this function does into a single query.
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
if err != nil {
return nil, err
return nil, fmt.Errorf("querying for other digests: %w", err)
}
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
defer rows.Close()
for rows.Next() {
var otherDigestString string
if err := rows.Scan(&otherDigestString); err != nil {
return nil, fmt.Errorf("scanning other digest: %w", err)
}
otherDigest, err := digest.Parse(otherDigestString)
if err != nil {
return nil, err
}
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
if err != nil {
return nil, err
}
}
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through other digests: %w", err)
}
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through other digests: %w", err)
}
if uncompressedDigest != primaryDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
if err != nil {
return nil, err
if uncompressedDigest != primaryDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
if err != nil {
return nil, err
}
}
}
}

View File

@@ -195,10 +195,10 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
return untrustedCertificate.PublicKey, nil
}
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes,
rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes,
untrustedBase64Signature, untrustedPayloadBytes)
if err != nil {
return nil, err

View File

@@ -113,7 +113,7 @@ func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
// Returns bundle upload time on success.
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
// FIXME: Should the publicKey parameter hard-code ecdsa?
// == Parse SET bytes
@@ -130,7 +130,14 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err))
}
untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes)
if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
publicKeymatched := false
for _, pk := range publicKeys {
if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
publicKeymatched = true
break
}
}
if !publicKeymatched {
return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed")
}

View File

@@ -7,6 +7,7 @@ import (
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/containers/image/v5/version"
@@ -171,24 +172,62 @@ type SigstorePayloadAcceptanceRules struct {
ValidateSignedDockerManifestDigest func(digest.Digest) error
}
// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components
// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created
// by any of the public keys in publicKeys.
//
// This is an internal implementation detail of VerifySigstorePayload and should have no other callers.
// It is INSUFFICIENT alone to consider the signature acceptable.
func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error {
if len(publicKeys) == 0 {
return errors.New("Need at least one public key to verify the sigstore payload, but got 0")
}
verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys))
for _, key := range publicKeys {
// Failing to load a verifier indicates that something is really, really
// invalid about the public key; prefer to fail even if the signature might be
// valid with other keys, so that users fix their fallback keys before they need them.
// For that reason, we even initialize all verifiers before trying to validate the signature
// with any key.
verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm)
if err != nil {
return err
}
verifiers = append(verifiers, verifier)
}
var failures []string
for _, verifier := range verifiers {
// github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
// which seems to be not used by anything. So we dont bother.
err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload))
if err == nil {
return nil
}
failures = append(failures, err.Error())
}
if len(failures) == 0 {
// Coverage: We have checked there is at least one public key, any success causes an early return,
// and any failure adds an entry to failures => there must be at least one error.
return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded")
}
return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", "))
}
// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components
// match expected values, both as specified by rules, and returns it.
// We return an *UntrustedSigstorePayload, although nothing actually uses it,
// just to double-check against stupid typos.
func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm)
if err != nil {
return nil, fmt.Errorf("creating verifier: %w", err)
}
func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
if err != nil {
return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err))
}
// github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
// which seems to be not used by anything. So we dont bother.
if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil {
return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err))
if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil {
return nil, err
}
var unmatchedPayload UntrustedSigstorePayload

View File

@@ -2,7 +2,6 @@ package signature
import (
"encoding/json"
"errors"
"fmt"
"github.com/containers/image/v5/signature/internal"
@@ -15,29 +14,57 @@ type PRSigstoreSignedOption func(*prSigstoreSigned) error
func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.KeyPath != "" {
return errors.New(`"keyPath" already specified`)
return InvalidPolicyFormatError(`"keyPath" already specified`)
}
pr.KeyPath = keyPath
return nil
}
}
// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.KeyPaths != nil {
return InvalidPolicyFormatError(`"keyPaths" already specified`)
}
if len(keyPaths) == 0 {
return InvalidPolicyFormatError(`"keyPaths" contains no entries`)
}
pr.KeyPaths = keyPaths
return nil
}
}
// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.KeyData != nil {
return errors.New(`"keyData" already specified`)
return InvalidPolicyFormatError(`"keyData" already specified`)
}
pr.KeyData = keyData
return nil
}
}
// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.KeyDatas != nil {
return InvalidPolicyFormatError(`"keyDatas" already specified`)
}
if len(keyDatas) == 0 {
return InvalidPolicyFormatError(`"keyDatas" contains no entries`)
}
pr.KeyDatas = keyDatas
return nil
}
}
// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.Fulcio != nil {
return errors.New(`"fulcio" already specified`)
return InvalidPolicyFormatError(`"fulcio" already specified`)
}
pr.Fulcio = fulcio
return nil
@@ -48,29 +75,57 @@ func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedO
func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.RekorPublicKeyPath != "" {
return errors.New(`"rekorPublicKeyPath" already specified`)
return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`)
}
pr.RekorPublicKeyPath = rekorPublicKeyPath
return nil
}
}
// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.RekorPublicKeyPaths != nil {
return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`)
}
if len(rekorPublickeyPaths) == 0 {
return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`)
}
pr.RekorPublicKeyPaths = rekorPublickeyPaths
return nil
}
}
// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.RekorPublicKeyData != nil {
return errors.New(`"rekorPublicKeyData" already specified`)
return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`)
}
pr.RekorPublicKeyData = rekorPublicKeyData
return nil
}
}
// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.RekorPublicKeyDatas != nil {
return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`)
}
if len(rekorPublickeyDatas) == 0 {
return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`)
}
pr.RekorPublicKeyDatas = rekorPublickeyDatas
return nil
}
}
// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.SignedIdentity != nil {
return errors.New(`"signedIdentity" already specified`)
return InvalidPolicyFormatError(`"signedIdentity" already specified`)
}
pr.SignedIdentity = signedIdentity
return nil
@@ -92,21 +147,40 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned,
if res.KeyPath != "" {
keySources++
}
if res.KeyPaths != nil {
keySources++
}
if res.KeyData != nil {
keySources++
}
if res.KeyDatas != nil {
keySources++
}
if res.Fulcio != nil {
keySources++
}
if keySources != 1 {
return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified")
return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified")
}
if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil {
return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously")
rekorSources := 0
if res.RekorPublicKeyPath != "" {
rekorSources++
}
if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil {
return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used")
if res.RekorPublicKeyPaths != nil {
rekorSources++
}
if res.RekorPublicKeyData != nil {
rekorSources++
}
if res.RekorPublicKeyDatas != nil {
rekorSources++
}
if rekorSources > 1 {
return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously")
}
if res.Fulcio != nil && rekorSources == 0 {
return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used")
}
if res.SignedIdentity == nil {
@@ -144,7 +218,8 @@ var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
*pr = prSigstoreSigned{}
var tmp prSigstoreSigned
var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool
var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool
var fulcio prSigstoreSignedFulcio
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
@@ -154,18 +229,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
case "keyPath":
gotKeyPath = true
return &tmp.KeyPath
case "keyPaths":
gotKeyPaths = true
return &tmp.KeyPaths
case "keyData":
gotKeyData = true
return &tmp.KeyData
case "keyDatas":
gotKeyDatas = true
return &tmp.KeyDatas
case "fulcio":
gotFulcio = true
return &fulcio
case "rekorPublicKeyPath":
gotRekorPublicKeyPath = true
return &tmp.RekorPublicKeyPath
case "rekorPublicKeyPaths":
gotRekorPublicKeyPaths = true
return &tmp.RekorPublicKeyPaths
case "rekorPublicKeyData":
gotRekorPublicKeyData = true
return &tmp.RekorPublicKeyData
case "rekorPublicKeyDatas":
gotRekorPublicKeyDatas = true
return &tmp.RekorPublicKeyDatas
case "signedIdentity":
return &signedIdentity
default:
@@ -192,18 +279,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
if gotKeyPath {
opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath))
}
if gotKeyPaths {
opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths))
}
if gotKeyData {
opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData))
}
if gotKeyDatas {
opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas))
}
if gotFulcio {
opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio))
}
if gotRekorPublicKeyPath {
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath))
}
if gotRekorPublicKeyPaths {
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths))
}
if gotRekorPublicKeyData {
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData))
}
if gotRekorPublicKeyDatas {
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas))
}
opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity))
res, err := newPRSigstoreSigned(opts...)
@@ -221,7 +320,7 @@ type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error
func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.CAPath != "" {
return errors.New(`"caPath" already specified`)
return InvalidPolicyFormatError(`"caPath" already specified`)
}
f.CAPath = caPath
return nil
@@ -232,7 +331,7 @@ func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOptio
func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.CAData != nil {
return errors.New(`"caData" already specified`)
return InvalidPolicyFormatError(`"caData" already specified`)
}
f.CAData = caData
return nil
@@ -243,7 +342,7 @@ func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOptio
func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.OIDCIssuer != "" {
return errors.New(`"oidcIssuer" already specified`)
return InvalidPolicyFormatError(`"oidcIssuer" already specified`)
}
f.OIDCIssuer = oidcIssuer
return nil
@@ -254,7 +353,7 @@ func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFul
func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.SubjectEmail != "" {
return errors.New(`"subjectEmail" already specified`)
return InvalidPolicyFormatError(`"subjectEmail" already specified`)
}
f.SubjectEmail = subjectEmail
return nil

View File

@@ -6,7 +6,6 @@ import (
"context"
"errors"
"fmt"
"os"
"slices"
"github.com/containers/image/v5/internal/multierr"
@@ -27,33 +26,18 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
}
// FIXME: move this to per-context initialization
var data [][]byte
keySources := 0
if pr.KeyPath != "" {
keySources++
d, err := os.ReadFile(pr.KeyPath)
if err != nil {
return sarRejected, nil, err
}
data = [][]byte{d}
const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`
data, err := loadBytesFromConfigSources(configBytesSources{
inconsistencyErrorMessage: notOneSourceErrorText,
path: pr.KeyPath,
paths: pr.KeyPaths,
data: pr.KeyData,
})
if err != nil {
return sarRejected, nil, err
}
if pr.KeyPaths != nil {
keySources++
data = [][]byte{}
for _, path := range pr.KeyPaths {
d, err := os.ReadFile(path)
if err != nil {
return sarRejected, nil, err
}
data = append(data, d)
}
}
if pr.KeyData != nil {
keySources++
data = [][]byte{pr.KeyData}
}
if keySources != 1 {
return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`)
if data == nil {
return sarRejected, nil, errors.New(notOneSourceErrorText)
}
// FIXME: move this to per-context initialization

View File

@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"os"
"strings"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/private"
@@ -20,37 +21,69 @@ import (
"github.com/sigstore/sigstore/pkg/cryptoutils"
)
// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set,
// configBytesSources contains configuration fields which may result in one or more []byte values
type configBytesSources struct {
inconsistencyErrorMessage string // Error to return if more than one source is set
path string // …Path: a path to a file containing the data, or ""
paths []string // …Paths: paths to files containing the data, or nil
data []byte // …Data: a single instance ofhe raw data, or nil
datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas
}
// loadBytesFromConfigSources ensures at most one of the sources in src is set,
// and returns the referenced data, or nil if neither is set.
func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) {
switch {
case data != nil && path != "":
return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix)
case path != "":
d, err := os.ReadFile(path)
func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) {
sources := 0
var data [][]byte // = nil
if src.path != "" {
sources++
d, err := os.ReadFile(src.path)
if err != nil {
return nil, err
}
return d, nil
case data != nil:
return data, nil
default: // Nothing
return nil, nil
data = [][]byte{d}
}
if src.paths != nil {
sources++
data = [][]byte{}
for _, path := range src.paths {
d, err := os.ReadFile(path)
if err != nil {
return nil, err
}
data = append(data, d)
}
}
if src.data != nil {
sources++
data = [][]byte{src.data}
}
if src.datas != nil { // codespell:ignore datas
sources++
data = src.datas // codespell:ignore datas
}
if sources > 1 {
return nil, errors.New(src.inconsistencyErrorMessage)
}
return data, nil
}
// prepareTrustRoot creates a fulcioTrustRoot from the input data.
// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath)
caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{
inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`,
path: f.CAPath,
data: f.CAData,
})
if err != nil {
return nil, err
}
if caCertBytes == nil {
return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`)
if len(caCertPEMs) != 1 {
return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`)
}
certs := x509.NewCertPool()
if ok := certs.AppendCertsFromPEM(caCertBytes); !ok {
if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok {
return nil, errors.New("error loading Fulcio CA certificates")
}
fulcio := fulcioTrustRoot{
@@ -66,24 +99,35 @@ func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy
type sigstoreSignedTrustRoot struct {
publicKey crypto.PublicKey
fulcio *fulcioTrustRoot
rekorPublicKey *ecdsa.PublicKey
publicKeys []crypto.PublicKey
fulcio *fulcioTrustRoot
rekorPublicKeys []*ecdsa.PublicKey
}
func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) {
res := sigstoreSignedTrustRoot{}
publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath)
publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`,
path: pr.KeyPath,
paths: pr.KeyPaths,
data: pr.KeyData,
datas: pr.KeyDatas, // codespell:ignore datas
})
if err != nil {
return nil, err
}
if publicKeyPEM != nil {
pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
if err != nil {
return nil, fmt.Errorf("parsing public key: %w", err)
if publicKeyPEMs != nil {
for index, keyData := range publicKeyPEMs {
pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData)
if err != nil {
return nil, fmt.Errorf("parsing public key %d: %w", index+1, err)
}
res.publicKeys = append(res.publicKeys, pk)
}
if len(res.publicKeys) == 0 {
return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`)
}
res.publicKey = pk
}
if pr.Fulcio != nil {
@@ -94,21 +138,32 @@ func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error)
res.fulcio = f
}
rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath)
rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`,
path: pr.RekorPublicKeyPath,
paths: pr.RekorPublicKeyPaths,
data: pr.RekorPublicKeyData,
datas: pr.RekorPublicKeyDatas, // codespell:ignore datas
})
if err != nil {
return nil, err
}
if rekorPublicKeyPEM != nil {
pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM)
if err != nil {
return nil, fmt.Errorf("parsing Rekor public key: %w", err)
}
pkECDSA, ok := pk.(*ecdsa.PublicKey)
if !ok {
return nil, fmt.Errorf("Rekor public key is not using ECDSA")
if rekorPublicKeyPEMs != nil {
for index, pem := range rekorPublicKeyPEMs {
pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem)
if err != nil {
return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err)
}
pkECDSA, ok := pk.(*ecdsa.PublicKey)
if !ok {
return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1)
}
res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA)
}
if len(res.rekorPublicKeys) == 0 {
return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`)
}
res.rekorPublicKey = pkECDSA
}
return &res, nil
@@ -134,37 +189,51 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
}
untrustedPayload := sig.UntrustedPayload()
var publicKey crypto.PublicKey
var publicKeys []crypto.PublicKey
switch {
case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified")
case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified")
case trustRoot.publicKey != nil:
if trustRoot.rekorPublicKey != nil {
case trustRoot.publicKeys != nil:
if trustRoot.rekorPublicKeys != nil {
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work.
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
}
// We could use publicKeyPEM directly, but lets re-marshal to avoid inconsistencies.
// FIXME: We could just generate DER instead of the full PEM text
recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey)
if err != nil {
// Coverage: The key was loaded from a PEM format, so its unclear how this could fail.
// (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
var rekorFailures []string
for _, candidatePublicKey := range trustRoot.publicKeys {
// We could use publicKeyPEM directly, but lets re-marshal to avoid inconsistencies.
// FIXME: We could just generate DER instead of the full PEM text
recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey)
if err != nil {
// Coverage: The key was loaded from a PEM format, so its unclear how this could fail.
// (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
}
// We dont care about the Rekor timestamp, just about log presence.
_, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload)
if err == nil {
publicKeys = append(publicKeys, candidatePublicKey)
break // The SET can only accept one public key entry, so if we found one, the rest either doesnt match or is a duplicate
}
rekorFailures = append(rekorFailures, err.Error())
}
// We dont care about the Rekor timestamp, just about log presence.
if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil {
return sarRejected, err
if len(publicKeys) == 0 {
if len(rekorFailures) == 0 {
// Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure.
return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`)
}
return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", ")))
}
} else {
publicKeys = trustRoot.publicKeys
}
publicKey = trustRoot.publicKey
case trustRoot.fulcio != nil:
if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations.
if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations.
return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key")
}
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
@@ -179,19 +248,20 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok {
untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain)
}
pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio,
pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio,
[]byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload)
if err != nil {
return sarRejected, err
}
publicKey = pk
publicKeys = []crypto.PublicKey{pk}
}
if publicKey == nil {
// Coverage: This should never happen, we have already excluded the possibility in the switch above.
if len(publicKeys) == 0 {
// Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set,
// and we have already excluded the possibility in the switch above.
return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload")
}
signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
ValidateSignedDockerReference: func(ref string) error {
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))

View File

@@ -74,7 +74,7 @@ type prSignedBy struct {
// KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyPath string `json:"keyPath,omitempty"`
// KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
// KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyPaths []string `json:"keyPaths,omitempty"`
// KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyData []byte `json:"keyData,omitempty"`
@@ -111,24 +111,35 @@ type prSignedBaseLayer struct {
type prSigstoreSigned struct {
prCommon
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified.
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
KeyPath string `json:"keyPath,omitempty"`
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified.
// KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
KeyPaths []string `json:"keyPaths,omitempty"`
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
KeyData []byte `json:"keyData,omitempty"`
// FIXME: Multiple public keys?
// KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
KeyDatas [][]byte `json:"keyDatas,omitempty"`
// Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified.
// Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
// If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well.
Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"`
// RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures.
// If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
// (and Rekor inclusion is not required if a Rekor public key is not specified).
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"`
// RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures.
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"`
// RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures.
// If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
// (and Rekor inclusion is not required if a Rekor public key is not specified).
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"`
// RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures.
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"`
// SignedIdentity specifies what image identity the signature must be claiming about the image.
// Defaults to "matchRepoDigestOrExact" if not specified.

View File

@@ -84,18 +84,36 @@ type storageImageDestinationLockProtected struct {
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
// In general, a layer is identified either by (compressed) digest, or by TOC digest.
// Externally, a layer is identified either by (compressed) digest, or by TOC digest
// (and we assume the TOC digest also uniquely identifies the contents, i.e. there arent two
// different formats/ways to parse a single TOC); internally, we use uncompressed digest (“DiffID”) or a TOC digest.
// We may or may not know the relationships between these three values.
//
// When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values
// we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not
// recompute those values for incomding layers — the point of the reuse is that we dont need to consume the incoming layer.)
// Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called.
// The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
// recompute those values for incoming layers — the point of the reuse is that we dont need to consume the incoming layer.)
//
// Layer identification: For a layer, at least one of (indexToDiffID, indexToTOCDigest, blobDiffIDs) must be available
// before commitLayer is called.
// The layer is identified by the first of the three fields which exists, in that order (and the value must be trusted).
//
// WARNING: All values in indexToDiffID, indexToTOCDigest, and blobDiffIDs are _individually_ trusted, but blobDiffIDs is more subtle.
// The values in indexTo* are all consistent, because the code writing them processed them all at once, and consistently.
// But it is possible for a layers indexToDiffID an indexToTOCDigest to be based on a TOC, without setting blobDiffIDs
// for the compressed digest of that index, and for blobDiffIDs[compressedDigest] to be set _separately_ while processing some
// other layer entry. In particular it is possible for indexToDiffID[index] and blobDiffIDs[compressedDigestAtIndex]] to refer
// to mismatching contents.
// Users of these fields should use trustedLayerIdentityDataLocked, which centralizes the validity logic,
// instead of interpreting these fields, especially blobDiffIDs, directly.
//
// Ideally we wouldnt have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
// to provide layer indices; and configs dont have layer indices. blobDiffIDs needs to exist for those cases.
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
// should be available; or indexToDiffID/indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
// They are looked up in the order they are mentioned above.
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer
@@ -145,9 +163,12 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
},
indexToStorageID: make(map[int]string),
lockProtected: storageImageDestinationLockProtected{
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
indexToTOCDigest: make(map[int]digest.Digest),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
indexToDiffID: make(map[int]digest.Digest),
indexToTOCDigest: make(map[int]digest.Digest),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
filenames: make(map[digest.Digest]string),
@@ -323,20 +344,30 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
s.lock.Lock()
if out.UncompressedDigest != "" {
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
if out.TOCDigest != "" {
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
}
// Dont set indexToTOCDigest on this path:
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
// That TOC is quite unlikely to match any other TOC value.
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
// responsible for ensuring blobDigest has been validated.
if out.CompressedDigest != blobDigest {
return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q",
out.CompressedDigest, blobDigest)
}
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
// So, record also information about blobDigest, that might benefit reuse.
// We trust ApplyDiffWithDiffer to validate or create both values correctly.
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
} else {
// Dont identify layers by TOC if UncompressedDigest is available.
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
// That TOC is quite unlikely to match with any other TOC value.
// Use diffID for layer identity if it is known.
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
}
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
}
s.lockProtected.diffOutputs[options.LayerIndex] = out
@@ -465,49 +496,40 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if len(layers) > 0 {
if size != -1 {
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
return true, private.ReusedBlob{
Digest: blobDigest,
Size: size,
}, nil
}
if !options.CanSubstitute {
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest)
}
s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest
return true, private.ReusedBlob{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
}, nil
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
return true, reused, nil
}
}
}
if options.TOCDigest != "" && options.LayerIndex != nil {
// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
if uncompressedDigest != "" {
layers, err = s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
reused.MatchedByTOCDigest = true
return true, reused, nil
}
}
// Check if we have a chunked layer in storage with the same TOC digest.
layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
}
if len(layers) > 0 {
if size != -1 {
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
return true, private.ReusedBlob{
Digest: blobDigest,
Size: size,
MatchedByTOCDigest: true,
}, nil
} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
return true, private.ReusedBlob{
Digest: layers[0].UncompressedDigest,
Size: layers[0].UncompressedSize,
MatchedByTOCDigest: true,
}, nil
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
if uncompressedDigest != "" {
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
}
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
reused.MatchedByTOCDigest = true
return true, reused, nil
}
}
@@ -515,49 +537,137 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
return false, private.ReusedBlob{}, nil
}
// reusedBlobFromLayerLookup returns (true, ReusedBlob) if layers contain a usable match; or (false, ...) if not.
// The caller is still responsible for setting the layer identification fields, to allow the layer to be found again.
func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, blobSize int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob) {
if len(layers) > 0 {
if blobSize != -1 {
return true, private.ReusedBlob{
Digest: blobDigest,
Size: blobSize,
}
} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
return true, private.ReusedBlob{
Digest: layers[0].UncompressedDigest,
Size: layers[0].UncompressedSize,
CompressionOperation: types.Decompress,
CompressionAlgorithm: nil,
}
}
}
return false, private.ReusedBlob{}
}
// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
type trustedLayerIdentityData struct {
layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
}
// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
//
// The caller must hold s.lock.
func (s *storageImageDestination) trustedLayerIdentityDataLocked(layerIndex int, blobDigest digest.Digest) (trustedLayerIdentityData, bool) {
// The decision about layerIdentifiedByTOC must be _stable_ once the data for layerIndex is set,
// even if s.lockProtected.blobDiffIDs changes later and we can subsequently find an entry that wasnt originally available.
//
// If we previously didn't have a blobDigest match and decided to use the TOC, but _later_ we happen to find
// a blobDigest match, we might in principle want to reconsider, set layerIdentifiedByTOC to false, and use the file:
// but the layer in question, and possibly child layers, might already have been committed to storage.
// A late-arriving addition to s.lockProtected.blobDiffIDs would mean that we would want to set
// new layer IDs for potentially the whole parent chain = throw away the just-created layers and create them all again.
//
// Such a within-image layer reuse is expected to be pretty rare; instead, ignore the unexpected file match
// and proceed to the originally-planned TOC match.
res := trustedLayerIdentityData{}
diffID, layerIdentifiedByDiffID := s.lockProtected.indexToDiffID[layerIndex]
if layerIdentifiedByDiffID {
res.layerIdentifiedByTOC = false
res.diffID = diffID
}
if tocDigest, ok := s.lockProtected.indexToTOCDigest[layerIndex]; ok {
res.tocDigest = tocDigest
if !layerIdentifiedByDiffID {
res.layerIdentifiedByTOC = true
}
}
if otherDiffID, ok := s.lockProtected.blobDiffIDs[blobDigest]; ok {
if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
// This is the only data we have, so it is clearly self-consistent.
res.layerIdentifiedByTOC = false
res.diffID = otherDiffID
res.blobDigest = blobDigest
layerIdentifiedByDiffID = true
} else {
// We have set up the layer identity without referring to blobDigest:
// an attacker might have used a manifest with non-matching tocDigest and blobDigest.
// But, if we know a trusted diffID value from other sources, and it matches the one for blobDigest,
// we know blobDigest is fine as well.
if res.diffID != "" && otherDiffID == res.diffID {
res.blobDigest = blobDigest
}
}
}
if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
return trustedLayerIdentityData{}, false // We found nothing at all
}
return res, true
}
// computeID computes a recommended image ID based on information we have so far. If
// the manifest is not of a type that we recognize, we return an empty value, indicating
// that since we don't have a recommendation, a random ID should be used if one needs
// to be allocated.
func (s *storageImageDestination) computeID(m manifest.Manifest) string {
func (s *storageImageDestination) computeID(m manifest.Manifest) (string, error) {
// This is outside of the scope of HasThreadSafePutBlob, so we dont need to hold s.lock.
layerInfos := m.LayerInfos()
// Build the diffID list. We need the decompressed sums that we've been calculating to
// fill in the DiffIDs. It's expected (but not enforced by us) that the number of
// diffIDs corresponds to the number of non-EmptyLayer entries in the history.
var diffIDs []digest.Digest
switch m := m.(type) {
switch m.(type) {
case *manifest.Schema1:
// Build a list of the diffIDs we've generated for the non-throwaway FS layers,
// in reverse of the order in which they were originally listed.
for i, compat := range m.ExtractedV1Compatibility {
if compat.ThrowAway {
// Build a list of the diffIDs we've generated for the non-throwaway FS layers
for i, li := range layerInfos {
if li.EmptyLayer {
continue
}
blobSum := m.FSLayers[i].BlobSum
diffID, ok := s.lockProtected.blobDiffIDs[blobSum]
if !ok {
// this can, in principle, legitimately happen when a layer is reused by TOC.
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
return ""
trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
}
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
if trusted.diffID == "" {
if trusted.layerIdentifiedByTOC {
logrus.Infof("v2s1 image uses a layer identified by TOC with unknown diffID; choosing a random image ID")
return "", nil
}
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) is not identified by TOC and has no diffID", i, li.Digest)
}
diffIDs = append(diffIDs, trusted.diffID)
}
case *manifest.Schema2, *manifest.OCI1:
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
// the diffID list.
default:
return ""
return "", nil
}
// We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption.
//
// Traditionally that means the same ~config digest, as computed by m.ImageID;
// but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
// but if we identify a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
// nor against the configs RootFS.DiffIDs. We dont really want to do either, to allow partial layer pulls where we never see
// most of the data.
//
// So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
// So, if a layer is identified by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
// must enter into the image ID computation.
// But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades,
// and to introduce the changed behavior only when partial pulls are used.
@@ -566,28 +676,31 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
// (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above.
ordinaryImageID, err := m.ImageID(diffIDs)
if err != nil {
return ""
return "", err
}
tocIDInput := ""
hasLayerPulledByTOC := false
for i := range m.LayerInfos() {
layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC
if ok {
for i, li := range layerInfos {
trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
}
layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
if trusted.layerIdentifiedByTOC {
hasLayerPulledByTOC = true
layerValue = tocDigest.String()
layerValue = trusted.tocDigest.String()
}
tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator.
}
if !hasLayerPulledByTOC {
return ordinaryImageID
return ordinaryImageID, nil
}
// ordinaryImageID is a digest of a config, which is a JSON value.
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
return tocImageID
return tocImageID, nil
}
// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
@@ -671,14 +784,14 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
s.lock.Lock()
defer s.lock.Unlock()
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
if !ok {
return "", false
}
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
if trusted.layerIdentifiedByTOC {
return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
}
return "", false
return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
}
// commitLayer commits the specified layer with the given index to the storage.
@@ -778,6 +891,16 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
diffOutput, ok := s.lockProtected.diffOutputs[index]
s.lock.Unlock()
if ok {
// If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
// That way it will be persisted in storage even if the cache is deleted; also
// we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
// the costly commit delay until a manifest is available).
s.lock.Lock()
if d, ok := s.lockProtected.indexToDiffID[index]; ok {
diffOutput.UncompressedDigest = d
}
s.lock.Unlock()
var untrustedUncompressedDigest digest.Digest
if diffOutput.UncompressedDigest == "" {
d, err := s.untrustedLayerDiffID(index)
@@ -832,47 +955,43 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions
var filename string
var gotFilename bool
s.lock.Lock()
tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set
optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set
filename, gotFilename := s.lockProtected.filenames[layerDigest]
trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest)
if ok && trusted.blobDigest != "" {
filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
}
s.lock.Unlock()
if gotFilename && tocDigest == "" {
// If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based,
// and we don't know the relationship of the layerDigest and TOC digest.
// We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer
// reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the
// originally-planned TOC match.
// Because tocDigest == "", optionaldiffID must have been set; and even if it werent, PutLayer will recompute the digest from the stream.
trustedUncompressedDigest = optionalDiffID
trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching.
if !ok { // We have already determined newLayerID, so the data must have been available.
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
}
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
if gotFilename {
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
trustedOriginalDigest = trusted.blobDigest
} else {
// Try to find the layer with contents matching the data we use.
var layer *storage.Layer // = nil
if tocDigest != "" {
layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
if err2 == nil && len(layers) > 0 {
if trusted.diffID != "" {
if layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(trusted.diffID); err2 == nil && len(layers) > 0 {
layer = &layers[0]
} else {
return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2)
}
} else {
// Because tocDigest == "", optionaldiffID must have been set
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID)
if err2 == nil && len(layers) > 0 {
layer = &layers[0]
} else {
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest)
if err2 == nil && len(layers) > 0 {
layer = &layers[0]
}
}
if layer == nil {
return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2)
}
}
if layer == nil && trusted.tocDigest != "" {
if layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(trusted.tocDigest); err2 == nil && len(layers) > 0 {
layer = &layers[0]
}
}
if layer == nil && trusted.blobDigest != "" {
if layers, err2 := s.imageRef.transport.store.LayersByCompressedDigest(trusted.blobDigest); err2 == nil && len(layers) > 0 {
layer = &layers[0]
}
}
if layer == nil {
return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
}
// Read the layer's contents.
noCompression := archive.Uncompressed
diffOptions := &storage.DiffOptions{
@@ -880,7 +999,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
}
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
if err2 != nil {
return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2)
return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2)
}
// Copy the layer diff to a file. Diff() takes a lock that it holds
// until the ReadCloser that it returns is closed, and PutLayer() wants
@@ -902,20 +1021,19 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
return nil, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
if optionalDiffID == "" && layer.UncompressedDigest != "" {
optionalDiffID = layer.UncompressedDigest
if trusted.diffID == "" && layer.UncompressedDigest != "" {
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
}
// The stream we have is uncompressed, this matches contents of the stream.
// If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream.
trustedUncompressedDigest = optionalDiffID
// FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted).
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
//
// FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
//
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
// but that would just result in PutLayer computing the digest of the input stream == optionalDiffID.
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
trustedOriginalDigest = optionalDiffID
trustedOriginalDigest = trusted.diffID
// Allow using the already-collected layer contents without extracting the layer again.
//
@@ -923,11 +1041,11 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
// We dont have the original compressed data here to trivially set filenames[layerDigest].
// In particular we cant achieve the correct Layer.CompressedSize value with the current c/storage API.
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
if trustedUncompressedDigest != "" {
if trusted.diffID != "" {
s.lock.Lock()
s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest
s.lockProtected.filenames[trustedUncompressedDigest] = filename
s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize
s.lockProtected.blobDiffIDs[trusted.diffID] = trusted.diffID
s.lockProtected.filenames[trusted.diffID] = filename
s.lockProtected.fileSizes[trusted.diffID] = fileSize
s.lock.Unlock()
}
}
@@ -940,11 +1058,12 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
OriginalDigest: trustedOriginalDigest,
UncompressedDigest: trustedUncompressedDigest,
OriginalDigest: trustedOriginalDigest,
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
UncompressedDigest: trusted.diffID,
}, file)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err)
return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err)
}
return layer, nil
}
@@ -1155,7 +1274,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Create the image record, pointing to the most-recently added layer.
intendedID := s.imageRef.id
if intendedID == "" {
intendedID = s.computeID(man)
intendedID, err = s.computeID(man)
if err != nil {
return err
}
}
oldNames := []string{}
img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)

View File

@@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 32
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View File

@@ -202,10 +202,11 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
// parseEncryptedCompact parses a message in compact format.
func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
parts := strings.Split(input, ".")
if len(parts) != 5 {
// Five parts is four separators
if strings.Count(input, ".") != 4 {
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
}
parts := strings.SplitN(input, ".", 5)
rawProtected, err := base64URLDecode(parts[0])
if err != nil {

View File

@@ -275,10 +275,11 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
// parseSignedCompact parses a message in compact format.
func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) {
parts := strings.Split(input, ".")
if len(parts) != 3 {
// Three parts is two separators
if strings.Count(input, ".") != 2 {
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
parts := strings.SplitN(input, ".", 3)
if parts[1] != "" && payload != nil {
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")

View File

@@ -1,3 +1,27 @@
# v4.0.4
## Fixed
- Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a
breaking change. See #136 / #137.
# v4.0.3
## Changed
- Allow unmarshalling JSONWebKeySets with unsupported key types (#130)
- Document that OpaqueKeyEncrypter can't be implemented (for now) (#129)
- Dependency updates
# v4.0.2
## Changed
- Improved documentation of Verify() to note that JSONWebKeySet is a supported
argument type (#104)
- Defined exported error values for missing x5c header and unsupported elliptic
curves error cases (#117)
# v4.0.1
## Fixed

View File

@@ -7,9 +7,3 @@ When submitting code, please make every effort to follow existing conventions
and style in order to keep the code as readable as possible. Please also make
sure all tests pass by running `go test`, and format your code with `go fmt`.
We also recommend using `golint` and `errcheck`.
Before your code can be accepted into the project you must also sign the
Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
will be prompted to sign once a pull request is opened.
[1]: https://cla-assistant.io/

View File

@@ -9,14 +9,6 @@ Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. This includes support for JSON Web Encryption,
JSON Web Signature, and JSON Web Token standards.
**Disclaimer**: This library contains encryption software that is subject to
the U.S. Export Administration Regulations. You may not export, re-export,
transfer or download this code or any part of it in violation of any United
States law, directive or regulation. In particular this software may not be
exported or re-exported in any form or on any media to Iran, North Sudan,
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
US maintained blocked list.
## Overview
The implementation follows the
@@ -109,6 +101,6 @@ allows attaching a key id.
Examples can be found in the Godoc
reference for this package. The
[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util)
[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util)
subdirectory also contains a small command-line utility which might be useful
as an example as well.

View File

@@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
}
key := tryJWKS(decryptionKey, obj.Header)
key, err := tryJWKS(decryptionKey, obj.Header)
if err != nil {
return nil, err
}
decrypter, err := newDecrypter(key)
if err != nil {
return nil, err
@@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
}
key := tryJWKS(decryptionKey, obj.Header)
key, err := tryJWKS(decryptionKey, obj.Header)
if err != nil {
return -1, Header{}, nil, err
}
decrypter, err := newDecrypter(key)
if err != nil {
return -1, Header{}, nil, err

View File

@@ -288,10 +288,11 @@ func ParseEncryptedCompact(
keyAlgorithms []KeyAlgorithm,
contentEncryption []ContentEncryption,
) (*JSONWebEncryption, error) {
parts := strings.Split(input, ".")
if len(parts) != 5 {
// Five parts is four separators
if strings.Count(input, ".") != 4 {
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
}
parts := strings.SplitN(input, ".", 5)
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
if err != nil {

View File

@@ -239,10 +239,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
keyPub = key
}
} else {
err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
}
default:
err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
}
if err != nil {
@@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
return key.K.bytes(), nil
}
func tryJWKS(key interface{}, headers ...Header) interface{} {
var (
// ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a
// key ID which matches one in the provided tokens headers.
ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set")
)
func tryJWKS(key interface{}, headers ...Header) (interface{}, error) {
var jwks JSONWebKeySet
switch jwksType := key.(type) {
@@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
case JSONWebKeySet:
jwks = jwksType
default:
return key
// If the specified key is not a JWKS, return as is.
return key, nil
}
// Determine the KID to search for from the headers.
var kid string
for _, header := range headers {
if header.KeyID != "" {
@@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
}
}
// If no KID is specified in the headers, reject.
if kid == "" {
return key
return nil, ErrJWKSKidNotFound
}
// Find the JWK with the matching KID. If no JWK with the specified KID is
// found, reject.
keys := jwks.Key(kid)
if len(keys) == 0 {
return key
return nil, ErrJWKSKidNotFound
}
return keys[0].Key
return keys[0].Key, nil
}

View File

@@ -327,10 +327,11 @@ func parseSignedCompact(
payload []byte,
signatureAlgorithms []SignatureAlgorithm,
) (*JSONWebSignature, error) {
parts := strings.Split(input, ".")
if len(parts) != 3 {
// Three parts is two separators
if strings.Count(input, ".") != 2 {
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
parts := strings.SplitN(input, ".", 3)
if parts[1] != "" && payload != nil {
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")

View File

@@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig
}
// OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key.
//
// Note: this cannot currently be implemented outside this package because of its
// unexported method.
type OpaqueKeyEncrypter interface {
// KeyID returns the kid
KeyID() string

View File

@@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
// The verificationKey argument must have one of the types allowed for the
// verificationKey argument of JSONWebSignature.Verify().
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
key := tryJWKS(verificationKey, obj.headers()...)
key, err := tryJWKS(verificationKey, obj.headers()...)
if err != nil {
return err
}
verifier, err := newVerifier(key)
if err != nil {
return err
@@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
// The verificationKey argument must have one of the types allowed for the
// verificationKey argument of JSONWebSignature.Verify().
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
key := tryJWKS(verificationKey, obj.headers()...)
key, err := tryJWKS(verificationKey, obj.headers()...)
if err != nil {
return -1, Signature{}, err
}
verifier, err := newVerifier(key)
if err != nil {
return -1, Signature{}, err

View File

@@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging

View File

@@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
outputFormat outputFormat
prefix string
values []any
valuesStr string
parentValuesStr string
depth int
opts *Options
group string // for slog groups
groupDepth int
outputFormat outputFormat
prefix string
values []any
valuesStr string
depth int
opts *Options
groupName string // for slog groups
groups []groupDef
}
// outputFormat indicates which outputFormat to use.
@@ -257,6 +256,13 @@ const (
outputJSON
)
// groupDef represents a saved group. The values may be empty, but we don't
// know if we need to render the group until the final record is rendered.
type groupDef struct {
name string
values string
}
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []any
@@ -264,76 +270,102 @@ type PseudoStruct []any
func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{') // for the whole line
buf.WriteByte('{') // for the whole record
}
// Render builtins
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
f.flatten(buf, vals, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if f.parentValuesStr != "" {
// Turn the inner-most group into a string
argsStr := func() string {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, true) // escape user-provided keys
return buf.String()
}()
// Render the stack of groups from the inside out.
bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
for i := len(f.groups) - 1; i >= 0; i-- {
grp := &f.groups[i]
if grp.values == "" && bodyStr == "" {
// no contents, so we must elide the whole group
continue
}
bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
}
if bodyStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.parentValuesStr)
continuing = true
}
groupDepth := f.groupDepth
if f.group != "" {
if f.valuesStr != "" || len(args) != 0 {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
} else {
// The group was empty
groupDepth--
}
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
continuing = true
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
for i := 0; i < groupDepth; i++ {
buf.WriteByte('}') // for the groups
buf.WriteString(bodyStr)
}
if f.outputFormat == outputJSON {
buf.WriteByte('}') // for the whole line
buf.WriteByte('}') // for the whole record
}
return buf.String()
}
// flatten renders a list of key-value pairs into a buffer. If continuing is
// true, it assumes that the buffer has previous values and will emit a
// separator (which depends on the output format) before the first pair it
// writes. If escapeKeys is true, the keys are assumed to have
// non-JSON-compatible characters in them and must be evaluated for escapes.
// renderGroup returns a string representation of the named group with rendered
// values and args. If the name is empty, this will return the values and args,
// joined. If the name is not empty, this will return a single key-value pair,
// where the value is a grouping of the values and args. If the values and
// args are both empty, this will return an empty string, even if the name was
// specified.
func (f Formatter) renderGroup(name string, values string, args string) string {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
needClosingBrace := false
if name != "" && (values != "" || args != "") {
buf.WriteString(f.quoted(name, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{')
needClosingBrace = true
}
continuing := false
if values != "" {
buf.WriteString(values)
continuing = true
}
if args != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(args)
}
if needClosingBrace {
buf.WriteByte('}')
}
return buf.String()
}
// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
// true, the keys are assumed to have non-JSON-compatible characters in them
// and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
@@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
v := kvList[i+1]
if i > 0 || continuing {
if i > 0 {
if f.outputFormat == outputJSON {
buf.WriteByte(f.comma())
} else {
@@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any {
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
func (f *Formatter) startGroup(group string) {
func (f *Formatter) startGroup(name string) {
// Unnamed groups are just inlined.
if group == "" {
if name == "" {
return
}
// Any saved values can no longer be changed.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
continuing := false
if f.parentValuesStr != "" {
buf.WriteString(f.parentValuesStr)
continuing = true
}
if f.group != "" && f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
}
// NOTE: We don't close the scope here - that's done later, when a log line
// is actually rendered (because we have N scopes to close).
f.parentValuesStr = buf.String()
n := len(f.groups)
f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
// Start collecting new values.
f.group = group
f.groupDepth++
f.groupName = name
f.valuesStr = ""
f.values = nil
}
@@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) {
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
f.flatten(buf, vals, false, true) // escape user-provided keys
f.flatten(buf, vals, true) // escape user-provided keys
f.valuesStr = buf.String()
}

View File

@@ -4,20 +4,21 @@ package runewidth
var combining = table{
{0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3},
{0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01},
{0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0},
{0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF},
{0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0CF3, 0x0CF3},
{0x0D00, 0x0D01}, {0x135D, 0x135F}, {0x1A7F, 0x1A7F},
{0x1AB0, 0x1ACE}, {0x1B6B, 0x1B73}, {0x1DC0, 0x1DFF},
{0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF},
{0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D},
{0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1},
{0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A},
{0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301},
{0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374},
{0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
{0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x10F82, 0x10F85},
{0x11300, 0x11301}, {0x1133B, 0x1133C}, {0x11366, 0x1136C},
{0x11370, 0x11374}, {0x16AF0, 0x16AF4}, {0x1CF00, 0x1CF2D},
{0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
{0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
{0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018},
{0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A},
{0x1E8D0, 0x1E8D6},
{0x1E08F, 0x1E08F}, {0x1E8D0, 0x1E8D6},
}
var doublewidth = table{
@@ -33,33 +34,34 @@ var doublewidth = table{
{0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
{0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
{0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
{0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB},
{0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF},
{0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3},
{0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF},
{0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C},
{0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19},
{0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B},
{0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4},
{0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5},
{0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152},
{0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004},
{0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
{0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248},
{0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320},
{0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393},
{0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0},
{0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440},
{0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E},
{0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596},
{0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5},
{0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7},
{0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB},
{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978},
{0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74},
{0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8},
{0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6},
{0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
{0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x303E},
{0x3041, 0x3096}, {0x3099, 0x30FF}, {0x3105, 0x312F},
{0x3131, 0x318E}, {0x3190, 0x31E3}, {0x31EF, 0x321E},
{0x3220, 0x3247}, {0x3250, 0x4DBF}, {0x4E00, 0xA48C},
{0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3},
{0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52},
{0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60},
{0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, {0x16FF0, 0x16FF1},
{0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08},
{0x1AFF0, 0x1AFF3}, {0x1AFF5, 0x1AFFB}, {0x1AFFD, 0x1AFFE},
{0x1B000, 0x1B122}, {0x1B132, 0x1B132}, {0x1B150, 0x1B152},
{0x1B155, 0x1B155}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB},
{0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E},
{0x1F191, 0x1F19A}, {0x1F200, 0x1F202}, {0x1F210, 0x1F23B},
{0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265},
{0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C},
{0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3},
{0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E},
{0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D},
{0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A},
{0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F},
{0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2},
{0x1F6D5, 0x1F6D7}, {0x1F6DC, 0x1F6DF}, {0x1F6EB, 0x1F6EC},
{0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, {0x1F7F0, 0x1F7F0},
{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F9FF},
{0x1FA70, 0x1FA7C}, {0x1FA80, 0x1FA88}, {0x1FA90, 0x1FABD},
{0x1FABF, 0x1FAC5}, {0x1FACE, 0x1FADB}, {0x1FAE0, 0x1FAE8},
{0x1FAF0, 0x1FAF8}, {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
}
var ambiguous = table{
@@ -154,43 +156,43 @@ var neutral = table{
{0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
{0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F},
{0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4},
{0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A},
{0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D},
{0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E},
{0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7},
{0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990},
{0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2},
{0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8},
{0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD},
{0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03},
{0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28},
{0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36},
{0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42},
{0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51},
{0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76},
{0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91},
{0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3},
{0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9},
{0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3},
{0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03},
{0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28},
{0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39},
{0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D},
{0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63},
{0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A},
{0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A},
{0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4},
{0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2},
{0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0},
{0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C},
{0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39},
{0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D},
{0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63},
{0x0600, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1},
{0x07C0, 0x07FA}, {0x07FD, 0x082D}, {0x0830, 0x083E},
{0x0840, 0x085B}, {0x085E, 0x085E}, {0x0860, 0x086A},
{0x0870, 0x088E}, {0x0890, 0x0891}, {0x0898, 0x0983},
{0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8},
{0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9},
{0x09BC, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CE},
{0x09D7, 0x09D7}, {0x09DC, 0x09DD}, {0x09DF, 0x09E3},
{0x09E6, 0x09FE}, {0x0A01, 0x0A03}, {0x0A05, 0x0A0A},
{0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30},
{0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39},
{0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, {0x0A47, 0x0A48},
{0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, {0x0A59, 0x0A5C},
{0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, {0x0A81, 0x0A83},
{0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8},
{0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9},
{0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD},
{0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1},
{0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, {0x0B05, 0x0B0C},
{0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, {0x0B2A, 0x0B30},
{0x0B32, 0x0B33}, {0x0B35, 0x0B39}, {0x0B3C, 0x0B44},
{0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57},
{0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, {0x0B66, 0x0B77},
{0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90},
{0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C},
{0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA},
{0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8},
{0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7},
{0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, {0x0C0E, 0x0C10},
{0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3C, 0x0C44},
{0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56},
{0x0C58, 0x0C5A}, {0x0C5D, 0x0C5D}, {0x0C60, 0x0C63},
{0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90},
{0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
{0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
{0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3},
{0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C},
{0x0CD5, 0x0CD6}, {0x0CDD, 0x0CDE}, {0x0CE0, 0x0CE3},
{0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF3}, {0x0D00, 0x0D0C},
{0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48},
{0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F},
{0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1},
@@ -200,7 +202,7 @@ var neutral = table{
{0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82},
{0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3},
{0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4},
{0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9},
{0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9},
{0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
{0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
{0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
@@ -212,20 +214,19 @@ var neutral = table{
{0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
{0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
{0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
{0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736},
{0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770},
{0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9},
{0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819},
{0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5},
{0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B},
{0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974},
{0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA},
{0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C},
{0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD},
{0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C},
{0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49},
{0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7},
{0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15},
{0x1700, 0x1715}, {0x171F, 0x1736}, {0x1740, 0x1753},
{0x1760, 0x176C}, {0x176E, 0x1770}, {0x1772, 0x1773},
{0x1780, 0x17DD}, {0x17E0, 0x17E9}, {0x17F0, 0x17F9},
{0x1800, 0x1819}, {0x1820, 0x1878}, {0x1880, 0x18AA},
{0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1920, 0x192B},
{0x1930, 0x193B}, {0x1940, 0x1940}, {0x1944, 0x196D},
{0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9},
{0x19D0, 0x19DA}, {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E},
{0x1A60, 0x1A7C}, {0x1A7F, 0x1A89}, {0x1A90, 0x1A99},
{0x1AA0, 0x1AAD}, {0x1AB0, 0x1ACE}, {0x1B00, 0x1B4C},
{0x1B50, 0x1B7E}, {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37},
{0x1C3B, 0x1C49}, {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA},
{0x1CBD, 0x1CC7}, {0x1CD0, 0x1CFA}, {0x1D00, 0x1F15},
{0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
{0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
{0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
@@ -237,7 +238,7 @@ var neutral = table{
{0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
{0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
{0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
{0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0},
{0x20AA, 0x20AB}, {0x20AD, 0x20C0}, {0x20D0, 0x20F0},
{0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
{0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
{0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
@@ -275,15 +276,15 @@ var neutral = table{
{0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE},
{0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A},
{0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73},
{0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E},
{0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27},
{0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70},
{0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE},
{0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6},
{0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE},
{0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF},
{0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF},
{0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839},
{0x2B76, 0x2B95}, {0x2B97, 0x2CF3}, {0x2CF9, 0x2D25},
{0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67},
{0x2D6F, 0x2D70}, {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6},
{0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE},
{0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6},
{0x2DD8, 0x2DDE}, {0x2DE0, 0x2E5D}, {0x303F, 0x303F},
{0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B}, {0xA640, 0xA6F7},
{0xA700, 0xA7CA}, {0xA7D0, 0xA7D1}, {0xA7D3, 0xA7D3},
{0xA7D5, 0xA7D9}, {0xA7F2, 0xA82C}, {0xA830, 0xA839},
{0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9},
{0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
{0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
@@ -294,8 +295,8 @@ var neutral = table{
{0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
{0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
{0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
{0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F},
{0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD},
{0xFB43, 0xFB44}, {0xFB46, 0xFBC2}, {0xFBD3, 0xFD8F},
{0xFD92, 0xFDC7}, {0xFDCF, 0xFDCF}, {0xFDF0, 0xFDFF},
{0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
{0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
@@ -307,44 +308,48 @@ var neutral = table{
{0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
{0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
{0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
{0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755},
{0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808},
{0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
{0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF},
{0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B},
{0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7},
{0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06},
{0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35},
{0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58},
{0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6},
{0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72},
{0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF},
{0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
{0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E},
{0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1},
{0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB},
{0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F},
{0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8},
{0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147},
{0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4},
{0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286},
{0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D},
{0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9},
{0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310},
{0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333},
{0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348},
{0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357},
{0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
{0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7},
{0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD},
{0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C},
{0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A},
{0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B},
{0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909},
{0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935},
{0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959},
{0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4},
{0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8},
{0x1056F, 0x1057A}, {0x1057C, 0x1058A}, {0x1058C, 0x10592},
{0x10594, 0x10595}, {0x10597, 0x105A1}, {0x105A3, 0x105B1},
{0x105B3, 0x105B9}, {0x105BB, 0x105BC}, {0x10600, 0x10736},
{0x10740, 0x10755}, {0x10760, 0x10767}, {0x10780, 0x10785},
{0x10787, 0x107B0}, {0x107B2, 0x107BA}, {0x10800, 0x10805},
{0x10808, 0x10808}, {0x1080A, 0x10835}, {0x10837, 0x10838},
{0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10857, 0x1089E},
{0x108A7, 0x108AF}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5},
{0x108FB, 0x1091B}, {0x1091F, 0x10939}, {0x1093F, 0x1093F},
{0x10980, 0x109B7}, {0x109BC, 0x109CF}, {0x109D2, 0x10A03},
{0x10A05, 0x10A06}, {0x10A0C, 0x10A13}, {0x10A15, 0x10A17},
{0x10A19, 0x10A35}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48},
{0x10A50, 0x10A58}, {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6},
{0x10AEB, 0x10AF6}, {0x10B00, 0x10B35}, {0x10B39, 0x10B55},
{0x10B58, 0x10B72}, {0x10B78, 0x10B91}, {0x10B99, 0x10B9C},
{0x10BA9, 0x10BAF}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2},
{0x10CC0, 0x10CF2}, {0x10CFA, 0x10D27}, {0x10D30, 0x10D39},
{0x10E60, 0x10E7E}, {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD},
{0x10EB0, 0x10EB1}, {0x10EFD, 0x10F27}, {0x10F30, 0x10F59},
{0x10F70, 0x10F89}, {0x10FB0, 0x10FCB}, {0x10FE0, 0x10FF6},
{0x11000, 0x1104D}, {0x11052, 0x11075}, {0x1107F, 0x110C2},
{0x110CD, 0x110CD}, {0x110D0, 0x110E8}, {0x110F0, 0x110F9},
{0x11100, 0x11134}, {0x11136, 0x11147}, {0x11150, 0x11176},
{0x11180, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211},
{0x11213, 0x11241}, {0x11280, 0x11286}, {0x11288, 0x11288},
{0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A9},
{0x112B0, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11303},
{0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328},
{0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339},
{0x1133B, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D},
{0x11350, 0x11350}, {0x11357, 0x11357}, {0x1135D, 0x11363},
{0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x1145B},
{0x1145D, 0x11461}, {0x11480, 0x114C7}, {0x114D0, 0x114D9},
{0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644},
{0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B9},
{0x116C0, 0x116C9}, {0x11700, 0x1171A}, {0x1171D, 0x1172B},
{0x11730, 0x11746}, {0x11800, 0x1183B}, {0x118A0, 0x118F2},
{0x118FF, 0x11906}, {0x11909, 0x11909}, {0x1190C, 0x11913},
{0x11915, 0x11916}, {0x11918, 0x11935}, {0x11937, 0x11938},
{0x1193B, 0x11946}, {0x11950, 0x11959}, {0x119A0, 0x119A7},
{0x119AA, 0x119D7}, {0x119DA, 0x119E4}, {0x11A00, 0x11A47},
{0x11A50, 0x11AA2}, {0x11AB0, 0x11AF8}, {0x11B00, 0x11B09},
{0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45},
{0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7},
{0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09},
@@ -352,30 +357,36 @@ var neutral = table{
{0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65},
{0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91},
{0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8},
{0x11F00, 0x11F10}, {0x11F12, 0x11F3A}, {0x11F3E, 0x11F59},
{0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399},
{0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
{0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646},
{0x12F90, 0x12FF2}, {0x13000, 0x13455}, {0x14400, 0x14646},
{0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69},
{0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5},
{0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61},
{0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A},
{0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F},
{0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88},
{0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5},
{0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245},
{0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378},
{0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
{0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
{0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
{0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
{0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
{0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
{0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B},
{0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
{0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
{0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
{0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9},
{0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
{0x16A6E, 0x16ABE}, {0x16AC0, 0x16AC9}, {0x16AD0, 0x16AED},
{0x16AF0, 0x16AF5}, {0x16B00, 0x16B45}, {0x16B50, 0x16B59},
{0x16B5B, 0x16B61}, {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F},
{0x16E40, 0x16E9A}, {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87},
{0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C},
{0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3},
{0x1CF00, 0x1CF2D}, {0x1CF30, 0x1CF46}, {0x1CF50, 0x1CFC3},
{0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1EA},
{0x1D200, 0x1D245}, {0x1D2C0, 0x1D2D3}, {0x1D2E0, 0x1D2F3},
{0x1D300, 0x1D356}, {0x1D360, 0x1D378}, {0x1D400, 0x1D454},
{0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2},
{0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9},
{0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505},
{0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C},
{0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544},
{0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5},
{0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, {0x1DA9B, 0x1DA9F},
{0x1DAA1, 0x1DAAF}, {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A},
{0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021},
{0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E030, 0x1E06D},
{0x1E08F, 0x1E08F}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
{0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E290, 0x1E2AE},
{0x1E2C0, 0x1E2F9}, {0x1E2FF, 0x1E2FF}, {0x1E4D0, 0x1E4F9},
{0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, {0x1E7ED, 0x1E7EE},
{0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
{0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
{0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03},
{0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24},
@@ -400,8 +411,8 @@ var neutral = table{
{0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
{0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
{0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4},
{0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773},
{0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
{0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F776},
{0x1F77B, 0x1F7D9}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
{0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD},
{0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B},
{0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D},

View File

@@ -7,10 +7,13 @@ import (
"time"
)
type CompareType int
// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it.
type CompareType = compareResult
type compareResult int
const (
compareLess CompareType = iota - 1
compareLess compareResult = iota - 1
compareEqual
compareGreater
)
@@ -39,7 +42,7 @@ var (
bytesType = reflect.TypeOf([]byte{})
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) {
obj1Value := reflect.ValueOf(obj1)
obj2Value := reflect.ValueOf(obj2)
@@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
}
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
if timeObj1.Before(timeObj2) {
return compareLess, true
}
if timeObj1.Equal(timeObj2) {
return compareEqual, true
}
return compareGreater, true
}
case reflect.Slice:
{
@@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
}
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true
}
case reflect.Uintptr:
{
@@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
@@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
return true
}
func containsValue(values []CompareType, value CompareType) bool {
func containsValue(values []compareResult, value compareResult) bool {
for _, v := range values {
if v == value {
return true

View File

@@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
// EqualValuesf asserts that two objects are equal or convertible to the larger
// type and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
@@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
}
// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should not match.
// This is an inverse of ElementsMatch.
//
// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
//
// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
//
// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// NotErrorAsf asserts that none of the errors in err's chain matches target,
// but if so, sets target to that error value.
func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
}
// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {

View File

@@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
// EqualValues asserts that two objects are equal or convertible to the larger
// type and equal.
//
// a.EqualValues(uint32(123), int32(123))
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
// EqualValuesf asserts that two objects are equal or convertible to the larger
// type and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
@@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti
// a.EventuallyWithT(func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor
// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin
return NotContainsf(a.t, s, contains, msg, args...)
}
// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should not match.
// This is an inverse of ElementsMatch.
//
// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
//
// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
//
// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotElementsMatch(a.t, listA, listB, msgAndArgs...)
}
// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should not match.
// This is an inverse of ElementsMatch.
//
// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
//
// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
//
// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotElementsMatchf(a.t, listA, listB, msg, args...)
}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
return NotEqualf(a.t, expected, actual, msg, args...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// NotErrorAs asserts that none of the errors in err's chain matches target,
// but if so, sets target to that error value.
func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotErrorAs(a.t, err, target, msgAndArgs...)
}
// NotErrorAsf asserts that none of the errors in err's chain matches target,
// but if so, sets target to that error value.
func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotErrorAsf(a.t, err, target, msg, args...)
}
// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface
return NotErrorIs(a.t, err, target, msgAndArgs...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {

View File

@@ -6,7 +6,7 @@ import (
)
// isOrdered checks that collection contains orderable elements.
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
objKind := reflect.TypeOf(object).Kind()
if objKind != reflect.Slice && objKind != reflect.Array {
return false
@@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
@@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
@@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
@@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}

View File

@@ -19,7 +19,9 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
"gopkg.in/yaml.v3"
// Wrapper around gopkg.in/yaml.v3
"github.com/stretchr/testify/assert/yaml"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful
// for table driven tests.
type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool
// Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool)
@@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
h.Helper()
}
if !samePointers(expected, actual) {
same, ok := samePointers(expected, actual)
if !ok {
return Fail(t, "Both arguments must be pointers", msgAndArgs...)
}
if !same {
// both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
@@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
h.Helper()
}
if samePointers(expected, actual) {
same, ok := samePointers(expected, actual)
if !ok {
//fails when the arguments are not pointers
return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
}
if same {
return Fail(t, fmt.Sprintf(
"Expected and actual point to the same object: %p %#v",
expected, expected), msgAndArgs...)
@@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
return true
}
// samePointers compares two generic interface objects and returns whether
// they point to the same object
func samePointers(first, second interface{}) bool {
// samePointers checks if two generic interface objects are pointers of the same
// type pointing to the same object. It returns two values: same indicating if
// they are the same type and point to the same object, and ok indicating that
// both inputs are pointers.
func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
return false
return false, false //not both are pointers
}
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
if firstType != secondType {
return false
return false, true // both are pointers, but of different types
}
// compare pointer addresses
return first == second
return first == second, true
}
// formatUnequalValues takes two values of arbitrary types and returns string
@@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string {
return value
}
// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
// EqualValues asserts that two objects are equal or convertible to the larger
// type and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
}
if aType.Kind() == reflect.Ptr {
aType = aType.Elem()
}
if bType.Kind() == reflect.Ptr {
bType = bType.Elem()
}
if aType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
}
if bType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
}
expected = copyExportedFields(expected)
actual = copyExportedFields(actual)
@@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri
return msg.String()
}
// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should not match.
// This is an inverse of ElementsMatch.
//
// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
//
// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
//
// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if isEmpty(listA) && isEmpty(listB) {
return Fail(t, "listA and listB contain the same elements", msgAndArgs)
}
if !isList(t, listA, msgAndArgs...) {
return Fail(t, "listA is not a list type", msgAndArgs...)
}
if !isList(t, listB, msgAndArgs...) {
return Fail(t, "listB is not a list type", msgAndArgs...)
}
extraA, extraB := diffLists(listA, listB)
if len(extraA) == 0 && len(extraB) == 0 {
return Fail(t, "listA and listB contain the same elements", msgAndArgs)
}
return true
}
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
if err != nil {
return Fail(t, err.Error(), msgAndArgs...)
}
if math.IsNaN(actualEpsilon) {
return Fail(t, "relative error is NaN", msgAndArgs...)
}
if actualEpsilon > epsilon {
return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
" < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
@@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in
// matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool {
var r *regexp.Regexp
if rr, ok := rx.(*regexp.Regexp); ok {
r = rr
@@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool {
r = regexp.MustCompile(fmt.Sprint(rx))
}
return (r.FindStringIndex(fmt.Sprint(str)) != nil)
switch v := str.(type) {
case []byte:
return r.Match(v)
case string:
return r.MatchString(v)
default:
return r.MatchString(fmt.Sprint(v))
}
}
@@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{
MaxDepth: 10,
}
type tHelper interface {
type tHelper = interface {
Helper()
}
@@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
// CollectT implements the TestingT interface and collects all errors.
type CollectT struct {
// A slice of errors. Non-nil slice denotes a failure.
// If it's non-nil but len(c.errors) == 0, this is also a failure
// obtained by direct c.FailNow() call.
errors []error
}
@@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...))
}
// FailNow panics.
func (*CollectT) FailNow() {
panic("Assertion failed")
// FailNow stops execution by calling runtime.Goexit.
func (c *CollectT) FailNow() {
c.fail()
runtime.Goexit()
}
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
@@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) {
panic("Copy() is deprecated")
}
func (c *CollectT) fail() {
if !c.failed() {
c.errors = []error{} // Make it non-nil to mark a failure.
}
}
func (c *CollectT) failed() bool {
return c.errors != nil
}
// EventuallyWithT asserts that given condition will be met in waitFor time,
// periodically checking target function each tick. In contrast to Eventually,
// it supplies a CollectT to the condition function, so that the condition
@@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) {
// assert.EventuallyWithT(t, func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
var lastFinishedTickErrs []error
ch := make(chan []error, 1)
ch := make(chan *CollectT, 1)
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
go func() {
collect := new(CollectT)
defer func() {
ch <- collect.errors
ch <- collect
}()
condition(collect)
}()
case errs := <-ch:
if len(errs) == 0 {
case collect := <-ch:
if !collect.failed() {
return true
}
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
lastFinishedTickErrs = errs
lastFinishedTickErrs = collect.errors
tick = ticker.C
}
}
@@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
), msgAndArgs...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
), msgAndArgs...)
}
// NotErrorAs asserts that none of the errors in err's chain matches target,
// but if so, sets target to that error value.
func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if !errors.As(err, target) {
return true
}
chain := buildErrorChainString(err)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+
"in chain: %s", target, chain,
), msgAndArgs...)
}
func buildErrorChainString(err error) string {
if err == nil {
return ""

View File

@@ -0,0 +1,25 @@
//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
//
// This implementation is selected with the testify_yaml_custom build tag.
//
// go test -tags testify_yaml_custom
//
// This implementation can be used at build time to replace the default implementation
// to avoid linking with [gopkg.in/yaml.v3].
//
// In your test package:
//
// import assertYaml "github.com/stretchr/testify/assert/yaml"
//
// func init() {
// assertYaml.Unmarshal = func (in []byte, out interface{}) error {
// // ...
// return nil
// }
// }
package yaml
var Unmarshal func(in []byte, out interface{}) error

View File

@@ -0,0 +1,37 @@
//go:build !testify_yaml_fail && !testify_yaml_custom
// +build !testify_yaml_fail,!testify_yaml_custom
// Package yaml is just an indirection to handle YAML deserialization.
//
// This package is just an indirection that allows the builder to override the
// indirection with an alternative implementation of this package that uses
// another implementation of YAML deserialization. This allows to not either not
// use YAML deserialization at all, or to use another implementation than
// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]).
//
// Alternative implementations are selected using build tags:
//
// - testify_yaml_fail: [Unmarshal] always fails with an error
// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it
// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or
// [github.com/stretchr/testify/assert.YAMLEqf].
//
// Usage:
//
// go test -tags testify_yaml_fail
//
// You can check with "go list" which implementation is linked:
//
// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
//
// [PR #1120]: https://github.com/stretchr/testify/pull/1120
package yaml
import goyaml "gopkg.in/yaml.v3"
// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal].
func Unmarshal(in []byte, out interface{}) error {
return goyaml.Unmarshal(in, out)
}

View File

@@ -0,0 +1,18 @@
//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
// Package yaml is an implementation of YAML functions that always fail.
//
// This implementation can be used at build time to replace the default implementation
// to avoid linking with [gopkg.in/yaml.v3]:
//
// go test -tags testify_yaml_fail
package yaml
import "errors"
var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)")
func Unmarshal([]byte, interface{}) error {
return errNotImplemented
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
{{.Comment}}
{{ replace .Comment "assert." "require."}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
if h, ok := t.(tHelper); ok { h.Helper() }
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }

View File

@@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
// EqualValues asserts that two objects are equal or convertible to the larger
// type and equal.
//
// a.EqualValues(uint32(123), int32(123))
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
@@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
EqualValues(a.t, expected, actual, msgAndArgs...)
}
// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
// EqualValuesf asserts that two objects are equal or convertible to the larger
// type and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
@@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti
// a.EventuallyWithT(func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w
// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin
NotContainsf(a.t, s, contains, msg, args...)
}
// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should not match.
// This is an inverse of ElementsMatch.
//
// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
//
// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
//
// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotElementsMatch(a.t, listA, listB, msgAndArgs...)
}
// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should not match.
// This is an inverse of ElementsMatch.
//
// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
//
// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
//
// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotElementsMatchf(a.t, listA, listB, msg, args...)
}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
NotEqualf(a.t, expected, actual, msg, args...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// NotErrorAs asserts that none of the errors in err's chain matches target,
// but if so, sets target to that error value.
func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotErrorAs(a.t, err, target, msgAndArgs...)
}
// NotErrorAsf asserts that none of the errors in err's chain matches target,
// but if so, sets target to that error value.
func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotErrorAsf(a.t, err, target, msg, args...)
}
// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface
NotErrorIs(a.t, err, target, msgAndArgs...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {

View File

@@ -6,7 +6,7 @@ type TestingT interface {
FailNow()
}
type tHelper interface {
type tHelper = interface {
Helper()
}

View File

@@ -5,6 +5,8 @@
// or individual tests (depending on which interface(s) you
// implement).
//
// The suite package does not support parallel tests. See [issue 934].
//
// A testing suite is usually built by first extending the built-in
// suite functionality from suite.Suite in testify. Alternatively,
// you could reproduce that logic on your own if you wanted (you
@@ -63,4 +65,6 @@
// func TestExampleTestSuite(t *testing.T) {
// suite.Run(t, new(ExampleTestSuite))
// }
//
// [issue 934]: https://github.com/stretchr/testify/issues/934
package suite

View File

@@ -173,13 +173,12 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
}
// EnableTriggerComplete enables triggering complete event. It's effective
// only for bars which were constructed with `total <= 0` and after total
// has been set with `(*Bar).SetTotal(int64, false)`. If `curren >= total`
// only for bars which were constructed with `total <= 0`. If `curren >= total`
// at the moment of call, complete event is triggered right away.
func (b *Bar) EnableTriggerComplete() {
select {
case b.operateState <- func(s *bState) {
if s.triggerComplete || s.total <= 0 {
if s.triggerComplete {
return
}
if s.current >= s.total {
@@ -196,10 +195,10 @@ func (b *Bar) EnableTriggerComplete() {
// SetTotal sets total to an arbitrary value. It's effective only for bar
// which was constructed with `total <= 0`. Setting total to negative value
// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster. If
// triggerCompletion is true, total value is set to current and complete
// event is triggered right away.
func (b *Bar) SetTotal(total int64, triggerCompletion bool) {
// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster.
// If `complete` is true complete event is triggered right away.
// Calling `(*Bar).EnableTriggerComplete` makes this one no operational.
func (b *Bar) SetTotal(total int64, complete bool) {
select {
case b.operateState <- func(s *bState) {
if s.triggerComplete {
@@ -210,7 +209,7 @@ func (b *Bar) SetTotal(total int64, triggerCompletion bool) {
} else {
s.total = total
}
if triggerCompletion {
if complete {
s.current = s.total
s.completed = true
b.triggerCompletion(s)

4
vendor/golang.org/x/crypto/LICENSE generated vendored
View File

@@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego
package poly1305

View File

@@ -1,108 +1,93 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT.
//go:build gc && !purego
#include "textflag.h"
#define POLY1305_ADD(msg, h0, h1, h2) \
ADDQ 0(msg), h0; \
ADCQ 8(msg), h1; \
ADCQ $1, h2; \
LEAQ 16(msg), msg
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
MOVQ r0, AX; \
MULQ h0; \
MOVQ AX, t0; \
MOVQ DX, t1; \
MOVQ r0, AX; \
MULQ h1; \
ADDQ AX, t1; \
ADCQ $0, DX; \
MOVQ r0, t2; \
IMULQ h2, t2; \
ADDQ DX, t2; \
\
MOVQ r1, AX; \
MULQ h0; \
ADDQ AX, t1; \
ADCQ $0, DX; \
MOVQ DX, h0; \
MOVQ r1, t3; \
IMULQ h2, t3; \
MOVQ r1, AX; \
MULQ h1; \
ADDQ AX, t2; \
ADCQ DX, t3; \
ADDQ h0, t2; \
ADCQ $0, t3; \
\
MOVQ t0, h0; \
MOVQ t1, h1; \
MOVQ t2, h2; \
ANDQ $3, h2; \
MOVQ t2, t0; \
ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
ADDQ t0, h0; \
ADCQ t3, h1; \
ADCQ $0, h2; \
SHRQ $2, t3, t2; \
SHRQ $2, t3; \
ADDQ t2, h0; \
ADCQ t3, h1; \
ADCQ $0, h2
// func update(state *[7]uint64, msg []byte)
// func update(state *macState, msg []byte)
TEXT ·update(SB), $0-32
MOVQ state+0(FP), DI
MOVQ msg_base+8(FP), SI
MOVQ msg_len+16(FP), R15
MOVQ 0(DI), R8 // h0
MOVQ 8(DI), R9 // h1
MOVQ 16(DI), R10 // h2
MOVQ 24(DI), R11 // r0
MOVQ 32(DI), R12 // r1
CMPQ R15, $16
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
CMPQ R15, $0x10
JB bytes_between_0_and_15
loop:
POLY1305_ADD(SI, R8, R9, R10)
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ $0x01, R10
LEAQ 16(SI), SI
multiply:
POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
SUBQ $16, R15
CMPQ R15, $16
JAE loop
MOVQ R11, AX
MULQ R8
MOVQ AX, BX
MOVQ DX, CX
MOVQ R11, AX
MULQ R9
ADDQ AX, CX
ADCQ $0x00, DX
MOVQ R11, R13
IMULQ R10, R13
ADDQ DX, R13
MOVQ R12, AX
MULQ R8
ADDQ AX, CX
ADCQ $0x00, DX
MOVQ DX, R8
MOVQ R12, R14
IMULQ R10, R14
MOVQ R12, AX
MULQ R9
ADDQ AX, R13
ADCQ DX, R14
ADDQ R8, R13
ADCQ $0x00, R14
MOVQ BX, R8
MOVQ CX, R9
MOVQ R13, R10
ANDQ $0x03, R10
MOVQ R13, BX
ANDQ $-4, BX
ADDQ BX, R8
ADCQ R14, R9
ADCQ $0x00, R10
SHRQ $0x02, R14, R13
SHRQ $0x02, R14
ADDQ R13, R8
ADCQ R14, R9
ADCQ $0x00, R10
SUBQ $0x10, R15
CMPQ R15, $0x10
JAE loop
bytes_between_0_and_15:
TESTQ R15, R15
JZ done
MOVQ $1, BX
MOVQ $0x00000001, BX
XORQ CX, CX
XORQ R13, R13
ADDQ R15, SI
flush_buffer:
SHLQ $8, BX, CX
SHLQ $8, BX
SHLQ $0x08, BX, CX
SHLQ $0x08, BX
MOVB -1(SI), R13
XORQ R13, BX
DECQ SI
DECQ R15
JNZ flush_buffer
ADDQ BX, R8
ADCQ CX, R9
ADCQ $0, R10
MOVQ $16, R15
ADCQ $0x00, R10
MOVQ $0x00000010, R15
JMP multiply
done:
MOVQ R8, 0(DI)
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
RET

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
package poly1305

View File

@@ -2,15 +2,25 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
#include "textflag.h"
// This was ported from the amd64 implementation.
#ifdef GOARCH_ppc64le
#define LE_MOVD MOVD
#define LE_MOVWZ MOVWZ
#define LE_MOVHZ MOVHZ
#else
#define LE_MOVD MOVDBR
#define LE_MOVWZ MOVWBR
#define LE_MOVHZ MOVHBR
#endif
#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
MOVD (msg), t0; \
MOVD 8(msg), t1; \
LE_MOVD (msg)( R0), t0; \
LE_MOVD (msg)(R24), t1; \
MOVD $1, t2; \
ADDC t0, h0, h0; \
ADDE t1, h1, h1; \
@@ -50,10 +60,6 @@
ADDE t3, h1, h1; \
ADDZE h2
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
GLOBL ·poly1305Mask<>(SB), RODATA, $16
// func update(state *[7]uint64, msg []byte)
TEXT ·update(SB), $0-32
MOVD state+0(FP), R3
@@ -66,6 +72,8 @@ TEXT ·update(SB), $0-32
MOVD 24(R3), R11 // r0
MOVD 32(R3), R12 // r1
MOVD $8, R24
CMP R5, $16
BLT bytes_between_0_and_15
@@ -94,7 +102,7 @@ flush_buffer:
// Greater than 8 -- load the rightmost remaining bytes in msg
// and put into R17 (h1)
MOVD (R4)(R21), R17
LE_MOVD (R4)(R21), R17
MOVD $16, R22
// Find the offset to those bytes
@@ -118,7 +126,7 @@ just1:
BLT less8
// Exactly 8
MOVD (R4), R16
LE_MOVD (R4), R16
CMP R17, $0
@@ -133,7 +141,7 @@ less8:
MOVD $0, R22 // shift count
CMP R5, $4
BLT less4
MOVWZ (R4), R16
LE_MOVWZ (R4), R16
ADD $4, R4
ADD $-4, R5
MOVD $32, R22
@@ -141,7 +149,7 @@ less8:
less4:
CMP R5, $2
BLT less2
MOVHZ (R4), R21
LE_MOVHZ (R4), R21
SLD R22, R21, R21
OR R16, R21, R16
ADD $16, R22

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,10 @@
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// All types in this package also implement [encoding.BinaryMarshaler],
// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and
// unmarshal the internal state of the hash.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//

View File

@@ -48,33 +48,52 @@ func init() {
crypto.RegisterHash(crypto.SHA3_512, New512)
}
const (
dsbyteSHA3 = 0b00000110
dsbyteKeccak = 0b00000001
dsbyteShake = 0b00011111
dsbyteCShake = 0b00000100
// rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in
// bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits.
rateK256 = (1600 - 256) / 8
rateK448 = (1600 - 448) / 8
rateK512 = (1600 - 512) / 8
rateK768 = (1600 - 768) / 8
rateK1024 = (1600 - 1024) / 8
)
func new224Generic() *state {
return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3}
}
func new256Generic() *state {
return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3}
}
func new384Generic() *state {
return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3}
}
func new512Generic() *state {
return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3}
}
// NewLegacyKeccak256 creates a new Keccak-256 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New256 instead.
func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
func NewLegacyKeccak256() hash.Hash {
return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak}
}
// NewLegacyKeccak512 creates a new Keccak-512 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New512 instead.
func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
func NewLegacyKeccak512() hash.Hash {
return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak}
}
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,15 @@
package sha3
import (
"crypto/subtle"
"encoding/binary"
"errors"
"unsafe"
"golang.org/x/sys/cpu"
)
// spongeDirection indicates the direction bytes are flowing through the sponge.
type spongeDirection int
@@ -14,16 +23,13 @@ const (
spongeSqueezing
)
const (
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate = 168
)
type state struct {
// Generic sponge components.
a [25]uint64 // main state of the hash
rate int // the number of bytes of state to use
a [1600 / 8]byte // main state of the hash
// a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR
// into before running the permutation. If squeezing, it's the remaining
// output to produce before running the permutation.
n, rate int
// dsbyte contains the "domain separation" bits and the first bit of
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
@@ -39,10 +45,6 @@ type state struct {
// Extendable-Output Functions (May 2014)"
dsbyte byte
i, n int // storage[i:n] is the buffer, i is only used while squeezing
storage [maxRate]byte
// Specific to SHA-3 and SHAKE.
outputLen int // the default output size in bytes
state spongeDirection // whether the sponge is absorbing or squeezing
}
@@ -61,7 +63,7 @@ func (d *state) Reset() {
d.a[i] = 0
}
d.state = spongeAbsorbing
d.i, d.n = 0, 0
d.n = 0
}
func (d *state) clone() *state {
@@ -69,22 +71,25 @@ func (d *state) clone() *state {
return &ret
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
// permute applies the KeccakF-1600 permutation.
func (d *state) permute() {
switch d.state {
case spongeAbsorbing:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
xorIn(d, d.storage[:d.rate])
d.n = 0
keccakF1600(&d.a)
case spongeSqueezing:
// If we're squeezing, we need to apply the permutation before
// copying more output.
keccakF1600(&d.a)
d.i = 0
copyOut(d, d.storage[:d.rate])
var a *[25]uint64
if cpu.IsBigEndian {
a = new([25]uint64)
for i := range a {
a[i] = binary.LittleEndian.Uint64(d.a[i*8:])
}
} else {
a = (*[25]uint64)(unsafe.Pointer(&d.a))
}
keccakF1600(a)
d.n = 0
if cpu.IsBigEndian {
for i := range a {
binary.LittleEndian.PutUint64(d.a[i*8:], a[i])
}
}
}
@@ -92,53 +97,36 @@ func (d *state) permute() {
// the multi-bitrate 10..1 padding rule, and permutes the state.
func (d *state) padAndPermute() {
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// at least one byte of space in the sponge because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d.storage[d.n] = d.dsbyte
d.n++
for d.n < d.rate {
d.storage[d.n] = 0
d.n++
}
d.a[d.n] ^= d.dsbyte
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d.storage[d.rate-1] ^= 0x80
d.a[d.rate-1] ^= 0x80
// Apply the permutation
d.permute()
d.state = spongeSqueezing
d.n = d.rate
copyOut(d, d.storage[:d.rate])
}
// Write absorbs more data into the hash's state. It panics if any
// output has already been read.
func (d *state) Write(p []byte) (written int, err error) {
func (d *state) Write(p []byte) (n int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: Write after Read")
}
written = len(p)
n = len(p)
for len(p) > 0 {
if d.n == 0 && len(p) >= d.rate {
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
xorIn(d, p[:d.rate])
p = p[d.rate:]
keccakF1600(&d.a)
} else {
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo := d.rate - d.n
if todo > len(p) {
todo = len(p)
}
d.n += copy(d.storage[d.n:], p[:todo])
p = p[todo:]
x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p)
d.n += x
p = p[x:]
// If the sponge is full, apply the permutation.
if d.n == d.rate {
d.permute()
}
// If the sponge is full, apply the permutation.
if d.n == d.rate {
d.permute()
}
}
@@ -156,14 +144,14 @@ func (d *state) Read(out []byte) (n int, err error) {
// Now, do the squeezing.
for len(out) > 0 {
n := copy(out, d.storage[d.i:d.n])
d.i += n
out = out[n:]
// Apply the permutation if we've squeezed the sponge dry.
if d.i == d.rate {
if d.n == d.rate {
d.permute()
}
x := copy(out, d.a[d.n:d.rate])
d.n += x
out = out[x:]
}
return
@@ -183,3 +171,74 @@ func (d *state) Sum(in []byte) []byte {
dup.Read(hash)
return append(in, hash...)
}
const (
magicSHA3 = "sha\x08"
magicShake = "sha\x09"
magicCShake = "sha\x0a"
magicKeccak = "sha\x0b"
// magic || rate || main state || n || sponge direction
marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1
)
func (d *state) MarshalBinary() ([]byte, error) {
return d.AppendBinary(make([]byte, 0, marshaledSize))
}
func (d *state) AppendBinary(b []byte) ([]byte, error) {
switch d.dsbyte {
case dsbyteSHA3:
b = append(b, magicSHA3...)
case dsbyteShake:
b = append(b, magicShake...)
case dsbyteCShake:
b = append(b, magicCShake...)
case dsbyteKeccak:
b = append(b, magicKeccak...)
default:
panic("unknown dsbyte")
}
// rate is at most 168, and n is at most rate.
b = append(b, byte(d.rate))
b = append(b, d.a[:]...)
b = append(b, byte(d.n), byte(d.state))
return b, nil
}
func (d *state) UnmarshalBinary(b []byte) error {
if len(b) != marshaledSize {
return errors.New("sha3: invalid hash state")
}
magic := string(b[:len(magicSHA3)])
b = b[len(magicSHA3):]
switch {
case magic == magicSHA3 && d.dsbyte == dsbyteSHA3:
case magic == magicShake && d.dsbyte == dsbyteShake:
case magic == magicCShake && d.dsbyte == dsbyteCShake:
case magic == magicKeccak && d.dsbyte == dsbyteKeccak:
default:
return errors.New("sha3: invalid hash state identifier")
}
rate := int(b[0])
b = b[1:]
if rate != d.rate {
return errors.New("sha3: invalid hash state function")
}
copy(d.a[:], b)
b = b[len(d.a):]
n, state := int(b[0]), spongeDirection(b[1])
if n > d.rate {
return errors.New("sha3: invalid hash state")
}
d.n = n
if state != spongeAbsorbing && state != spongeSqueezing {
return errors.New("sha3: invalid hash state")
}
d.state = state
return nil
}

View File

@@ -16,9 +16,12 @@ package sha3
// [2] https://doi.org/10.6028/NIST.SP.800-185
import (
"bytes"
"encoding/binary"
"errors"
"hash"
"io"
"math/bits"
)
// ShakeHash defines the interface to hash functions that support
@@ -50,44 +53,36 @@ type cshakeState struct {
initBlock []byte
}
// Consts for configuring initial SHA-3 state
const (
dsbyteShake = 0x1f
dsbyteCShake = 0x04
rate128 = 168
rate256 = 136
)
func bytepad(input []byte, w int) []byte {
// leftEncode always returns max 9 bytes
buf := make([]byte, 0, 9+len(input)+w)
buf = append(buf, leftEncode(uint64(w))...)
buf = append(buf, input...)
padlen := w - (len(buf) % w)
return append(buf, make([]byte, padlen)...)
func bytepad(data []byte, rate int) []byte {
out := make([]byte, 0, 9+len(data)+rate-1)
out = append(out, leftEncode(uint64(rate))...)
out = append(out, data...)
if padlen := rate - len(out)%rate; padlen < rate {
out = append(out, make([]byte, padlen)...)
}
return out
}
func leftEncode(value uint64) []byte {
var b [9]byte
binary.BigEndian.PutUint64(b[1:], value)
// Trim all but last leading zero bytes
i := byte(1)
for i < 8 && b[i] == 0 {
i++
func leftEncode(x uint64) []byte {
// Let n be the smallest positive integer for which 2^(8n) > x.
n := (bits.Len64(x) + 7) / 8
if n == 0 {
n = 1
}
// Prepend number of encoded bytes
b[i-1] = 9 - i
return b[i-1:]
// Return n || x with n as a byte and x an n bytes in big-endian order.
b := make([]byte, 9)
binary.BigEndian.PutUint64(b[1:], x)
b = b[9-n-1:]
b[0] = byte(n)
return b
}
func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}}
// leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes
c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...)
c.initBlock = append(c.initBlock, N...)
c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...)
c.initBlock = append(c.initBlock, S...)
c.Write(bytepad(c.initBlock, c.rate))
return &c
@@ -111,6 +106,30 @@ func (c *state) Clone() ShakeHash {
return c.clone()
}
func (c *cshakeState) MarshalBinary() ([]byte, error) {
return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock)))
}
func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) {
b, err := c.state.AppendBinary(b)
if err != nil {
return nil, err
}
b = append(b, c.initBlock...)
return b, nil
}
func (c *cshakeState) UnmarshalBinary(b []byte) error {
if len(b) <= marshaledSize {
return errors.New("sha3: invalid hash state")
}
if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil {
return err
}
c.initBlock = bytes.Clone(b[marshaledSize:])
return nil
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
@@ -126,11 +145,11 @@ func NewShake256() ShakeHash {
}
func newShake128Generic() *state {
return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake}
return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake}
}
func newShake256Generic() *state {
return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake}
return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake}
}
// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
@@ -143,7 +162,7 @@ func NewCShake128(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake128()
}
return newCShake(N, S, rate128, 32, dsbyteCShake)
return newCShake(N, S, rateK256, 32, dsbyteCShake)
}
// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
@@ -156,7 +175,7 @@ func NewCShake256(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake256()
}
return newCShake(N, S, rate256, 64, dsbyteCShake)
return newCShake(N, S, rateK512, 64, dsbyteCShake)
}
// ShakeSum128 writes an arbitrary-length digest of data into hash.

View File

@@ -1,40 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
import (
"crypto/subtle"
"encoding/binary"
"unsafe"
"golang.org/x/sys/cpu"
)
// xorIn xors the bytes in buf into the state.
func xorIn(d *state, buf []byte) {
if cpu.IsBigEndian {
for i := 0; len(buf) >= 8; i++ {
a := binary.LittleEndian.Uint64(buf)
d.a[i] ^= a
buf = buf[8:]
}
} else {
ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a))
subtle.XORBytes(ab[:], ab[:], buf)
}
}
// copyOut copies uint64s to a byte buffer.
func copyOut(d *state, b []byte) {
if cpu.IsBigEndian {
for i := 0; len(b) >= 8; i++ {
binary.LittleEndian.PutUint64(b, d.a[i])
b = b[8:]
}
} else {
ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a))
copy(b, ab[:])
}
}

4
vendor/golang.org/x/exp/LICENSE generated vendored
View File

@@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

4
vendor/golang.org/x/net/LICENSE generated vendored
View File

@@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View File

@@ -827,10 +827,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
cc.peerMaxHeaderTableSize = initialHeaderTableSize
if t.AllowHTTP {
cc.nextStreamID = 3
}
if cs, ok := c.(connectionStater); ok {
state := cs.ConnectionState()
cc.tlsState = &state

4
vendor/golang.org/x/oauth2/LICENSE generated vendored
View File

@@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

4
vendor/golang.org/x/sync/LICENSE generated vendored
View File

@@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

4
vendor/golang.org/x/sys/LICENSE generated vendored
View File

@@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

17
vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s generated vendored Normal file
View File

@@ -0,0 +1,17 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && amd64 && gc
#include "textflag.h"
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctlbyname(SB)
GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB)

21
vendor/golang.org/x/sys/cpu/cpu.go generated vendored
View File

@@ -105,6 +105,8 @@ var ARM64 struct {
HasSVE bool // Scalable Vector Extensions
HasSVE2 bool // Scalable Vector Extensions 2
HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
HasDIT bool // Data Independent Timing support
HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions
_ CacheLinePad
}
@@ -199,6 +201,25 @@ var S390X struct {
_ CacheLinePad
}
// RISCV64 contains the supported CPU features and performance characteristics for riscv64
// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate
// the presence of RISC-V extensions.
//
// It is safe to assume that all the RV64G extensions are supported and so they are omitted from
// this structure. As riscv64 Go programs require at least RV64G, the code that populates
// this structure cannot run successfully if some of the RV64G extensions are missing.
// The struct is padded to avoid false sharing.
var RISCV64 struct {
_ CacheLinePad
HasFastMisaligned bool // Fast misaligned accesses
HasC bool // Compressed instruction-set extension
HasV bool // Vector extension compatible with RVV 1.0
HasZba bool // Address generation instructions extension
HasZbb bool // Basic bit-manipulation extension
HasZbs bool // Single-bit instructions extension
_ CacheLinePad
}
func init() {
archInit()
initOptions()

View File

@@ -38,6 +38,8 @@ func initOptions() {
{Name: "dcpop", Feature: &ARM64.HasDCPOP},
{Name: "asimddp", Feature: &ARM64.HasASIMDDP},
{Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
{Name: "dit", Feature: &ARM64.HasDIT},
{Name: "i8mm", Feature: &ARM64.HasI8MM},
}
}
@@ -145,6 +147,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
ARM64.HasLRCPC = true
}
switch extractBits(isar1, 52, 55) {
case 1:
ARM64.HasI8MM = true
}
// ID_AA64PFR0_EL1
switch extractBits(pfr0, 16, 19) {
case 0:
@@ -168,6 +175,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
parseARM64SVERegister(getzfr0())
}
switch extractBits(pfr0, 48, 51) {
case 1:
ARM64.HasDIT = true
}
}
func parseARM64SVERegister(zfr0 uint64) {

61
vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && amd64 && gc
package cpu
// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl
// call (see issue 43089). It also restricts AVX512 support for Darwin to
// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233).
//
// Background:
// Darwin implements a special mechanism to economize on thread state when
// AVX512 specific registers are not in use. This scheme minimizes state when
// preempting threads that haven't yet used any AVX512 instructions, but adds
// special requirements to check for AVX512 hardware support at runtime (e.g.
// via sysctl call or commpage inspection). See issue 43089 and link below for
// full background:
// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240
//
// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0
// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption
// of the AVX512 mask registers (K0-K7) upon signal return. For this reason
// AVX512 is considered unsafe to use on Darwin for kernel versions prior to
// 21.3.0, where a fix has been confirmed. See issue 49233 for full background.
func darwinSupportsAVX512() bool {
return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0)
}
// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies
func darwinKernelVersionCheck(major, minor, patch int) bool {
var release [256]byte
err := darwinOSRelease(&release)
if err != nil {
return false
}
var mmp [3]int
c := 0
Loop:
for _, b := range release[:] {
switch {
case b >= '0' && b <= '9':
mmp[c] = 10*mmp[c] + int(b-'0')
case b == '.':
c++
if c > 2 {
return false
}
case b == 0:
break Loop
default:
return false
}
}
if c != 2 {
return false
}
return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch)
}

View File

@@ -6,10 +6,10 @@
package cpu
// cpuid is implemented in cpu_x86.s for gc compiler
// cpuid is implemented in cpu_gc_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func xgetbv() (eax, edx uint32)

View File

@@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24
RET
// func xgetbv() (eax, edx uint32)
TEXT ·xgetbv(SB),NOSPLIT,$0-8
TEXT ·xgetbv(SB), NOSPLIT, $0-8
MOVL $0, CX
XGETBV
MOVL AX, eax+0(FP)

View File

@@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) {
gccgoXgetbv(&a, &d)
return a, d
}
// gccgo doesn't build on Darwin, per:
// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76
func darwinSupportsAVX512() bool {
return false
}

View File

@@ -35,8 +35,10 @@ const (
hwcap_SHA512 = 1 << 21
hwcap_SVE = 1 << 22
hwcap_ASIMDFHM = 1 << 23
hwcap_DIT = 1 << 24
hwcap2_SVE2 = 1 << 1
hwcap2_I8MM = 1 << 13
)
// linuxKernelCanEmulateCPUID reports whether we're running
@@ -106,9 +108,11 @@ func doinit() {
ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
ARM64.HasDIT = isSet(hwCap, hwcap_DIT)
// HWCAP2 feature bits
ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2)
ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM)
}
func isSet(hwc uint, value uint) bool {

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
package cpu

137
vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"syscall"
"unsafe"
)
// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe
// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available.
//
// A note on detection of the Vector extension using HWCAP.
//
// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5.
// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe
// syscall is not available then neither is the Vector extension (which needs kernel support).
// The riscv_hwprobe syscall should then be all we need to detect the Vector extension.
// However, some RISC-V board manufacturers ship boards with an older kernel on top of which
// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe
// patches. These kernels advertise support for the Vector extension using HWCAP. Falling
// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not
// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option.
//
// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by
// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board
// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified
// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use
// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector
// extension are binary incompatible. HWCAP can then not be used in isolation to populate the
// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0.
//
// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector
// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype
// register. This check would allow us to safely detect version 1.0 of the Vector extension
// with HWCAP, if riscv_hwprobe were not available. However, the check cannot
// be added until the assembler supports the Vector instructions.
//
// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the
// extensions it advertises support for are explicitly versioned. It's also worth noting that
// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba.
// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority
// of RISC-V extensions.
//
// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information.
// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must
// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall
// here.
const (
// Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4
riscv_HWPROBE_IMA_C = 0x2
riscv_HWPROBE_IMA_V = 0x4
riscv_HWPROBE_EXT_ZBA = 0x8
riscv_HWPROBE_EXT_ZBB = 0x10
riscv_HWPROBE_EXT_ZBS = 0x20
riscv_HWPROBE_KEY_CPUPERF_0 = 0x5
riscv_HWPROBE_MISALIGNED_FAST = 0x3
riscv_HWPROBE_MISALIGNED_MASK = 0x7
)
const (
// sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go.
sys_RISCV_HWPROBE = 258
)
// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
type riscvHWProbePairs struct {
key int64
value uint64
}
const (
// CPU features
hwcap_RISCV_ISA_C = 1 << ('C' - 'A')
)
func doinit() {
// A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key
// field should be initialised with one of the key constants defined above, e.g.,
// RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value.
// If the kernel does not recognise a key it will set the key field to -1 and the value field to 0.
pairs := []riscvHWProbePairs{
{riscv_HWPROBE_KEY_IMA_EXT_0, 0},
{riscv_HWPROBE_KEY_CPUPERF_0, 0},
}
// This call only indicates that extensions are supported if they are implemented on all cores.
if riscvHWProbe(pairs, 0) {
if pairs[0].key != -1 {
v := uint(pairs[0].value)
RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C)
RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V)
RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA)
RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB)
RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS)
}
if pairs[1].key != -1 {
v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK
RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST
}
}
// Let's double check with HWCAP if the C extension does not appear to be supported.
// This may happen if we're running on a kernel older than 6.4.
if !RISCV64.HasC {
RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C)
}
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}
// riscvHWProbe is a simplified version of the generated wrapper function found in
// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the
// cpuCount and cpus parameters which we do not need. We always want to pass 0 for
// these parameters here so the kernel only reports the extensions that are present
// on all cores.
func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool {
var _zero uintptr
var p0 unsafe.Pointer
if len(pairs) > 0 {
p0 = unsafe.Pointer(&pairs[0])
} else {
p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0)
return e1 == 0
}

11
vendor/golang.org/x/sys/cpu/cpu_other_x86.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc))
package cpu
func darwinSupportsAVX512() bool {
panic("only implemented for gc && amd64 && darwin")
}

View File

@@ -8,4 +8,13 @@ package cpu
const cacheLineSize = 64
func initOptions() {}
func initOptions() {
options = []option{
{Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned},
{Name: "c", Feature: &RISCV64.HasC},
{Name: "v", Feature: &RISCV64.HasV},
{Name: "zba", Feature: &RISCV64.HasZba},
{Name: "zbb", Feature: &RISCV64.HasZbb},
{Name: "zbs", Feature: &RISCV64.HasZbs},
}
}

View File

@@ -92,10 +92,8 @@ func archInit() {
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
if runtime.GOOS == "darwin" {
// Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers.
// Since users can't rely on mask register contents, let's not advertise AVX-512 support.
// See issue 49233.
osSupportsAVX512 = false
// Darwin requires special AVX512 checks, see cpu_darwin_x86.go
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else {
// Check if OPMASK and ZMM registers have OS support.
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)

98
vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Minimal copy of x/sys/unix so the cpu package can make a
// system call on Darwin without depending on x/sys/unix.
//go:build darwin && amd64 && gc
package cpu
import (
"syscall"
"unsafe"
)
type _C_int int32
// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419
func darwinOSRelease(release *[256]byte) error {
// from x/sys/unix/zerrors_openbsd_amd64.go
const (
CTL_KERN = 0x1
KERN_OSRELEASE = 0x2
)
mib := []_C_int{CTL_KERN, KERN_OSRELEASE}
n := unsafe.Sizeof(*release)
return sysctl(mib, &release[0], &n, nil, 0)
}
type Errno = syscall.Errno
var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes.
// from x/sys/unix/zsyscall_darwin_amd64.go L791-807
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
if _, _, err := syscall_syscall6(
libc_sysctl_trampoline_addr,
uintptr(_p0),
uintptr(len(mib)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen),
); err != 0 {
return err
}
return nil
}
var libc_sysctl_trampoline_addr uintptr
// adapted from internal/cpu/cpu_arm64_darwin.go
func darwinSysctlEnabled(name []byte) bool {
out := int32(0)
nout := unsafe.Sizeof(out)
if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil {
return false
}
return out > 0
}
//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
var libc_sysctlbyname_trampoline_addr uintptr
// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix
func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
if _, _, err := syscall_syscall6(
libc_sysctlbyname_trampoline_addr,
uintptr(unsafe.Pointer(name)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen),
0,
); err != 0 {
return err
}
return nil
}
//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib"
// Implemented in the runtime package (runtime/sys_darwin.go)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
//go:linkname syscall_syscall6 syscall.syscall6

View File

@@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
1. Construct the set of common code that is idential in all architecture-specific files.
1. Construct the set of common code that is identical in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.

View File

@@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
return &value, err
}
// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
// association for the network device specified by ifname.
func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
ifr, err := NewIfreq(ifname)
if err != nil {
return nil, err
}
value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
ifrd := ifr.withData(unsafe.Pointer(&value))
err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
return &value, err
}
// IoctlGetHwTstamp retrieves the hardware timestamping configuration
// for the network device specified by ifname.
func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
ifr, err := NewIfreq(ifname)
if err != nil {
return nil, err
}
value := HwTstampConfig{}
ifrd := ifr.withData(unsafe.Pointer(&value))
err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
return &value, err
}
// IoctlSetHwTstamp updates the hardware timestamping configuration for
// the network device specified by ifname.
func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
ifr, err := NewIfreq(ifname)
if err != nil {
return err
}
ifrd := ifr.withData(unsafe.Pointer(cfg))
return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
}
// FdToClockID derives the clock ID from the file descriptor number
// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
// suitable for system calls like ClockGettime.
func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
// IoctlPtpClockGetcaps returns the description of a given PTP device.
func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
var value PtpClockCaps
err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpSysOffsetPrecise returns a description of the clock
// offset compared to the system clock.
func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
var value PtpSysOffsetPrecise
err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpSysOffsetExtended returns an extended description of the
// clock offset compared to the system clock. The samples parameter
// specifies the desired number of measurements.
func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
value := PtpSysOffsetExtended{Samples: uint32(samples)}
err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpPinGetfunc returns the configuration of the specified
// I/O pin on given PTP device.
func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
value := PtpPinDesc{Index: uint32(index)}
err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpPinSetfunc updates configuration of the specified PTP
// I/O pin.
func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
}
// IoctlPtpPeroutRequest configures the periodic output mode of the
// PTP I/O pins.
func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
}
// IoctlPtpExttsRequest configures the external timestamping mode
// of the PTP I/O pins.
func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
}
// IoctlGetWatchdogInfo fetches information about a watchdog device from the
// Linux watchdog API. For more information, see:
// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.

View File

@@ -58,6 +58,7 @@ includes_Darwin='
#define _DARWIN_USE_64_BIT_INODE
#define __APPLE_USE_RFC_3542
#include <stdint.h>
#include <sys/stdio.h>
#include <sys/attr.h>
#include <sys/clonefile.h>
#include <sys/kern_control.h>
@@ -157,6 +158,16 @@ includes_Linux='
#endif
#define _GNU_SOURCE
// See the description in unix/linux/types.go
#if defined(__ARM_EABI__) || \
(defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \
(defined(__powerpc__) && (!defined(__powerpc64__)))
# ifdef _TIME_BITS
# undef _TIME_BITS
# endif
# define _TIME_BITS 32
#endif
// <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
// these structures. We just include them copied from <bits/termios.h>.
#if defined(__powerpc__)
@@ -255,6 +266,7 @@ struct ltchars {
#include <linux/nsfs.h>
#include <linux/perf_event.h>
#include <linux/pps.h>
#include <linux/ptp_clock.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/reboot.h>
@@ -526,6 +538,7 @@ ccflags="$@"
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
$2 ~ /^PTP_/ ||
$2 ~ /^RAW_PAYLOAD_/ ||
$2 ~ /^[US]F_/ ||
$2 ~ /^TP_STATUS_/ ||
@@ -551,6 +564,7 @@ ccflags="$@"
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
$2 ~ /^(CONNECT|SAE)_/ ||
$2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
@@ -654,7 +668,7 @@ errors=$(
signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
@@ -664,7 +678,7 @@ echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"

View File

@@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int,
var status _C_int
var r Pid_t
err = ERESTART
// AIX wait4 may return with ERESTART errno, while the processus is still
// AIX wait4 may return with ERESTART errno, while the process is still
// active.
for err == ERESTART {
r, err = wait4(Pid_t(pid), &status, options, rusage)

View File

@@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error {
return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq))
}
//sys renamexNp(from string, to string, flag uint32) (err error)
func RenamexNp(from string, to string, flag uint32) (err error) {
return renamexNp(from, to, flag)
}
//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error)
func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) {
return renameatxNp(fromfd, from, tofd, to, flag)
}
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL
func Uname(uname *Utsname) error {
@@ -554,6 +566,43 @@ func PthreadFchdir(fd int) (err error) {
return pthread_fchdir_np(fd)
}
// Connectx calls connectx(2) to initiate a connection on a socket.
//
// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument.
//
// - srcIf is the optional source interface index. 0 means unspecified.
// - srcAddr is the optional source address. nil means unspecified.
// - dstAddr is the destination address.
//
// On success, Connectx returns the number of bytes enqueued for transmission.
func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) {
endpoints := SaEndpoints{
Srcif: srcIf,
}
if srcAddr != nil {
addrp, addrlen, err := srcAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Srcaddr = (*RawSockaddr)(addrp)
endpoints.Srcaddrlen = uint32(addrlen)
}
if dstAddr != nil {
addrp, addrlen, err := dstAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Dstaddr = (*RawSockaddr)(addrp)
endpoints.Dstaddrlen = uint32(addrlen)
}
err = connectx(fd, &endpoints, associd, flags, iov, &n, connid)
return
}
//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)

View File

@@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return sendfile(outfd, infd, offset, count)
}
func Dup3(oldfd, newfd, flags int) error {
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
return EINVAL
}
how := F_DUP2FD
if flags&O_CLOEXEC != 0 {
how = F_DUP2FD_CLOEXEC
}
_, err := fcntl(oldfd, how, newfd)
return err
}
/*
* Exposed directly
*/

View File

@@ -11,6 +11,7 @@ package unix
int ioctl(int, unsigned long int, uintptr_t);
*/
import "C"
import "unsafe"
func ioctl(fd int, req uint, arg uintptr) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))

View File

@@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
return &value, err
}
// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPVegasInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr"
// algorithm.
//
// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
//
// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) {
var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
vallen := _Socklen(SizeofTCPCCInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
out := (*TCPBBRInfo)(unsafe.Pointer(&value[0]))
return out, err
}
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
@@ -1818,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys ClockSettime(clockid int32, time *Timespec) (err error)
//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
//sys Close(fd int) (err error)
//sys CloseRange(first uint, last uint, flags uint) (err error)
@@ -1959,7 +2002,26 @@ func Getpgrp() (pid int) {
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
//sys Getrandom(buf []byte, flags int) (n int, err error)
func Getrandom(buf []byte, flags int) (n int, err error) {
vdsoRet, supported := vgetrandom(buf, uint32(flags))
if supported {
if vdsoRet < 0 {
return 0, errnoErr(syscall.Errno(-vdsoRet))
}
return vdsoRet, nil
}
var p *byte
if len(buf) > 0 {
p = &buf[0]
}
r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags))
if e != 0 {
return 0, errnoErr(e)
}
return int(r), nil
}
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)
@@ -2592,3 +2654,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
}
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
//sys Mseal(b []byte, flags uint) (err error)

View File

@@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
const SYS_FSTATAT = SYS_NEWFSTATAT

View File

@@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
const SYS_FSTATAT = SYS_NEWFSTATAT

View File

@@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error
}
return riscvHWProbe(pairs, setSize, set, flags)
}
const SYS_FSTATAT = SYS_NEWFSTATAT

Some files were not shown because too many files have changed in this diff Show More