mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #69408 from cblecker/bump-cert-dep
Bump github.com/google/certificate-transparency-go to v1.0.21
This commit is contained in:
commit
0187ed03eb
35
Godeps/Godeps.json
generated
35
Godeps/Godeps.json
generated
@ -1500,6 +1500,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libnetwork/ipvs",
|
"ImportPath": "github.com/docker/libnetwork/ipvs",
|
||||||
|
"Comment": "v0.8.0-dev.2-1265-ga9cd636e",
|
||||||
"Rev": "a9cd636e37898226332c439363e2ed0ea185ae92"
|
"Rev": "a9cd636e37898226332c439363e2ed0ea185ae92"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1541,7 +1542,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/evanphx/json-patch",
|
"ImportPath": "github.com/evanphx/json-patch",
|
||||||
"Comment": "v3.0.0-34-g36442db",
|
"Comment": "v4.0.0-3-g36442db",
|
||||||
"Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4"
|
"Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1994,43 +1995,43 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go",
|
"ImportPath": "github.com/google/certificate-transparency-go",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go/asn1",
|
"ImportPath": "github.com/google/certificate-transparency-go/asn1",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go/client",
|
"ImportPath": "github.com/google/certificate-transparency-go/client",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go/client/configpb",
|
"ImportPath": "github.com/google/certificate-transparency-go/client/configpb",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go/jsonclient",
|
"ImportPath": "github.com/google/certificate-transparency-go/jsonclient",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go/tls",
|
"ImportPath": "github.com/google/certificate-transparency-go/tls",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go/x509",
|
"ImportPath": "github.com/google/certificate-transparency-go/x509",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/certificate-transparency-go/x509/pkix",
|
"ImportPath": "github.com/google/certificate-transparency-go/x509/pkix",
|
||||||
"Comment": "v1.0.10",
|
"Comment": "v1.0.21",
|
||||||
"Rev": "1bec4527572c443752ad4f2830bef88be0533236"
|
"Rev": "3629d6846518309d22c16fee15d1007262a459d2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/gofuzz",
|
"ImportPath": "github.com/google/gofuzz",
|
||||||
|
5
vendor/github.com/google/certificate-transparency-go/.gitignore
generated
vendored
5
vendor/github.com/google/certificate-transparency-go/.gitignore
generated
vendored
@ -16,9 +16,14 @@
|
|||||||
/data
|
/data
|
||||||
/dumpscts
|
/dumpscts
|
||||||
/etcdiscover
|
/etcdiscover
|
||||||
|
/findlog
|
||||||
|
/goshawk
|
||||||
|
/gosmin
|
||||||
/gossip_server
|
/gossip_server
|
||||||
/preloader
|
/preloader
|
||||||
/scanlog
|
/scanlog
|
||||||
|
/sctcheck
|
||||||
|
/sctscan
|
||||||
/trillian_log_server
|
/trillian_log_server
|
||||||
/trillian_log_signer
|
/trillian_log_signer
|
||||||
/trillian.json
|
/trillian.json
|
||||||
|
47
vendor/github.com/google/certificate-transparency-go/.travis.yml
generated
vendored
47
vendor/github.com/google/certificate-transparency-go/.travis.yml
generated
vendored
@ -1,29 +1,42 @@
|
|||||||
sudo: false
|
sudo: true # required for CI push into Kubernetes.
|
||||||
language: go
|
language: go
|
||||||
os: linux
|
os: linux
|
||||||
go: 1.9
|
go: "1.10"
|
||||||
|
|
||||||
|
go_import_path: github.com/google/certificate-transparency-go
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- GOFLAGS=
|
- GCE_CI=${ENABLE_GCE_CI} GOFLAGS=
|
||||||
- GOFLAGS=-race
|
- GOFLAGS=-race
|
||||||
- GOFLAGS= WITH_ETCD=true
|
- GOFLAGS= WITH_ETCD=true WITH_COVERAGE=true
|
||||||
- GOFLAGS=-race WITH_ETCD=true
|
- GOFLAGS=-race WITH_ETCD=true
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
|
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
sources:
|
||||||
|
- mysql-5.7-trusty
|
||||||
|
packages:
|
||||||
|
- mysql-server
|
||||||
|
- mysql-client
|
||||||
|
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
|
||||||
|
- sudo mysql_upgrade
|
||||||
|
- sudo service mysql restart
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- |
|
|
||||||
if [ ! -d $HOME/gopath/src/github.com/google ]; then
|
|
||||||
mkdir -p $HOME/gopath/src/github.com/google
|
|
||||||
ln -s $TRAVIS_BUILD_DIR $HOME/gopath/src/github.com/google/certificate-transparency-go
|
|
||||||
fi
|
|
||||||
- mkdir ../protoc
|
- mkdir ../protoc
|
||||||
- |
|
- |
|
||||||
(
|
(
|
||||||
cd ../protoc
|
cd ../protoc
|
||||||
wget https://github.com/google/protobuf/releases/download/v3.2.0/protoc-3.2.0-${TRAVIS_OS_NAME}-x86_64.zip
|
wget https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
|
||||||
unzip protoc-3.2.0-${TRAVIS_OS_NAME}-x86_64.zip
|
unzip protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
|
||||||
)
|
)
|
||||||
- export PATH=$(pwd)/../protoc/bin:$PATH
|
- export PATH=$(pwd)/../protoc/bin:$PATH
|
||||||
- go get -d -t ./...
|
- go get -d -t ./...
|
||||||
@ -41,9 +54,8 @@ install:
|
|||||||
|
|
||||||
script:
|
script:
|
||||||
- set -e
|
- set -e
|
||||||
- export TRILLIAN_SQL_DRIVER=mysql
|
|
||||||
- cd $HOME/gopath/src/github.com/google/certificate-transparency-go
|
- cd $HOME/gopath/src/github.com/google/certificate-transparency-go
|
||||||
- ./scripts/presubmit.sh ${PRESUBMIT_OPTS}
|
- ./scripts/presubmit.sh ${PRESUBMIT_OPTS} ${WITH_COVERAGE:+--coverage}
|
||||||
- |
|
- |
|
||||||
# Check re-generation didn't change anything
|
# Check re-generation didn't change anything
|
||||||
status=$(git status --porcelain | grep -v coverage) || :
|
status=$(git status --porcelain | grep -v coverage) || :
|
||||||
@ -64,3 +76,12 @@ script:
|
|||||||
after_success:
|
after_success:
|
||||||
- cp /tmp/coverage.txt .
|
- cp /tmp/coverage.txt .
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
- bash <(curl -s https://codecov.io/bash)
|
||||||
|
- |
|
||||||
|
# Push up to GCE CI instance if we're running after a merge to master
|
||||||
|
if [[ "${GCE_CI}" == "true" ]] && [[ $TRAVIS_PULL_REQUEST == "false" ]] && [[ $TRAVIS_BRANCH == "master" ]]; then
|
||||||
|
. scripts/install_cloud.sh
|
||||||
|
echo ${GCLOUD_SERVICE_KEY_CI} | base64 --decode -i > ${HOME}/gcloud-service-key.json
|
||||||
|
gcloud auth activate-service-account --key-file ${HOME}/gcloud-service-key.json
|
||||||
|
rm ${HOME}/gcloud-service-key.json
|
||||||
|
. scripts/deploy_gce_ci.sh
|
||||||
|
fi
|
||||||
|
208
vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
generated
vendored
Normal file
208
vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
# CERTIFICATE-TRANSPARENCY-GO Changelog
|
||||||
|
|
||||||
|
## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements
|
||||||
|
|
||||||
|
Published 2018-07-05 09:21:34 +0000 UTC
|
||||||
|
|
||||||
|
Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`.
|
||||||
|
|
||||||
|
The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`.
|
||||||
|
|
||||||
|
An implementation of Gossip has been added. See the `gossip/minimal` package for more information.
|
||||||
|
|
||||||
|
An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10.
|
||||||
|
|
||||||
|
Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20)
|
||||||
|
|
||||||
|
## v1.0.19 - CTFE User Quota
|
||||||
|
|
||||||
|
Published 2018-06-01 13:51:52 +0000 UTC
|
||||||
|
|
||||||
|
CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains.
|
||||||
|
|
||||||
|
Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19)
|
||||||
|
|
||||||
|
## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config
|
||||||
|
|
||||||
|
Published 2018-06-01 14:28:20 +0000 UTC
|
||||||
|
|
||||||
|
Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
|
||||||
|
|
||||||
|
The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
|
||||||
|
|
||||||
|
The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package.
|
||||||
|
|
||||||
|
Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18)
|
||||||
|
|
||||||
|
## v1.0.17 - Merkle verification / Tracing / Demo script / CORS
|
||||||
|
|
||||||
|
Published 2018-06-01 14:25:16 +0000 UTC
|
||||||
|
|
||||||
|
Now uses Merkle Tree verification from Trillian.
|
||||||
|
|
||||||
|
The CT server now supports CORS.
|
||||||
|
|
||||||
|
Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
|
||||||
|
|
||||||
|
A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project.
|
||||||
|
|
||||||
|
Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17)
|
||||||
|
|
||||||
|
## v1.0.16 - Lifecycle test / Go 1.10.1
|
||||||
|
|
||||||
|
Published 2018-06-01 14:22:23 +0000 UTC
|
||||||
|
|
||||||
|
An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
|
||||||
|
|
||||||
|
Changes to `x509` were merged from Go 1.10.1.
|
||||||
|
|
||||||
|
Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16)
|
||||||
|
|
||||||
|
## v1.0.15 - More control of verification, grpclb, stackdriver metrics
|
||||||
|
|
||||||
|
Published 2018-06-01 14:20:32 +0000 UTC
|
||||||
|
|
||||||
|
Facilities were added to the `x509` package to control whether verification checks are applied.
|
||||||
|
|
||||||
|
Log server requests are now balanced using `gRPClb`.
|
||||||
|
|
||||||
|
For Kubernetes, metrics can be published to Stackdriver monitoring.
|
||||||
|
|
||||||
|
Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15)
|
||||||
|
|
||||||
|
## v1.0.14 - SQLite Removed, LeafHashForLeaf
|
||||||
|
|
||||||
|
Published 2018-06-01 14:15:37 +0000 UTC
|
||||||
|
|
||||||
|
Support for SQLlite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
|
||||||
|
|
||||||
|
A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests.
|
||||||
|
|
||||||
|
Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14)
|
||||||
|
|
||||||
|
## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification
|
||||||
|
|
||||||
|
Published 2018-06-01 14:15:21 +0000 UTC
|
||||||
|
|
||||||
|
Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
|
||||||
|
|
||||||
|
Updates were made to GCE ingress and health checks.
|
||||||
|
|
||||||
|
The log list utility can verify signatures.
|
||||||
|
|
||||||
|
Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13)
|
||||||
|
|
||||||
|
## v1.0.12 - Client / util updates & CTFE fixes
|
||||||
|
|
||||||
|
Published 2018-06-01 14:13:42 +0000 UTC
|
||||||
|
|
||||||
|
The CT client can now use a JSON loglist to find logs.
|
||||||
|
|
||||||
|
CTFE had a fix applied for preissued precerts.
|
||||||
|
|
||||||
|
A DNS client was added and CT client was extended to support DNS retrieval.
|
||||||
|
|
||||||
|
Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12)
|
||||||
|
|
||||||
|
## v1.0.11 - Kubernetes CI / Integration fixes
|
||||||
|
|
||||||
|
Published 2018-06-01 14:12:18 +0000 UTC
|
||||||
|
|
||||||
|
Updates to Kubernetes configs, mostly related to running a CI instance.
|
||||||
|
|
||||||
|
Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11)
|
||||||
|
|
||||||
|
## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates
|
||||||
|
|
||||||
|
Published 2018-06-01 14:09:47 +0000 UTC
|
||||||
|
|
||||||
|
The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
|
||||||
|
|
||||||
|
The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1.
|
||||||
|
|
||||||
|
Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10)
|
||||||
|
|
||||||
|
## v1.0.9 - Scanner, x509, utility and client fixes
|
||||||
|
|
||||||
|
Published 2018-06-01 14:11:13 +0000 UTC
|
||||||
|
|
||||||
|
The `scanner` utility now displays throughput stats.
|
||||||
|
|
||||||
|
Build instructions and README files were updated.
|
||||||
|
|
||||||
|
The `certcheck` utility can be told to ignore unknown critical X.509 extensions.
|
||||||
|
|
||||||
|
Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9)
|
||||||
|
|
||||||
|
## v1.0.8 - Client fixes, align with trillian repo
|
||||||
|
|
||||||
|
Published 2018-06-01 14:06:44 +0000 UTC
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8)
|
||||||
|
|
||||||
|
## v1.0.7 - CTFE fixes
|
||||||
|
|
||||||
|
Published 2018-06-01 14:06:13 +0000 UTC
|
||||||
|
|
||||||
|
An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed.
|
||||||
|
|
||||||
|
Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7)
|
||||||
|
|
||||||
|
## v1.0.6 - crlcheck improvements / other fixes
|
||||||
|
|
||||||
|
Published 2018-06-01 14:04:22 +0000 UTC
|
||||||
|
|
||||||
|
The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs.
|
||||||
|
|
||||||
|
Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6)
|
||||||
|
|
||||||
|
## v1.0.5 - X509 and asn1 fixes
|
||||||
|
|
||||||
|
Published 2018-06-01 14:02:58 +0000 UTC
|
||||||
|
|
||||||
|
This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilties were also updated.
|
||||||
|
|
||||||
|
Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5)
|
||||||
|
|
||||||
|
## v1.0.4 - Multi log backend configs
|
||||||
|
|
||||||
|
Published 2018-06-01 14:02:07 +0000 UTC
|
||||||
|
|
||||||
|
Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers.
|
||||||
|
|
||||||
|
Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4)
|
||||||
|
|
||||||
|
## v1.0.3 - Hammer updates, use standard context
|
||||||
|
|
||||||
|
Published 2018-06-01 14:01:11 +0000 UTC
|
||||||
|
|
||||||
|
After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on.
|
||||||
|
|
||||||
|
Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3)
|
||||||
|
|
||||||
|
## v1.0.2 - Go 1.9
|
||||||
|
|
||||||
|
Published 2018-06-01 14:00:00 +0000 UTC
|
||||||
|
|
||||||
|
Go 1.9 is now required to build the code.
|
||||||
|
|
||||||
|
Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2)
|
||||||
|
|
||||||
|
## v1.0.1 - Hammer and client improvements
|
||||||
|
|
||||||
|
Published 2018-06-01 13:59:29 +0000 UTC
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1)
|
||||||
|
|
||||||
|
## v1.0 - First Trillian CT Release
|
||||||
|
|
||||||
|
Published 2018-06-01 13:59:00 +0000 UTC
|
||||||
|
|
||||||
|
This is the point that corresponds to the 1.0 release in the trillian repo.
|
||||||
|
|
||||||
|
Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0)
|
||||||
|
|
88
vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
generated
vendored
88
vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
generated
vendored
@ -1,22 +1,12 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: multilog.proto
|
// source: multilog.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package configpb is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
multilog.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
TemporalLogConfig
|
|
||||||
LogShardConfig
|
|
||||||
*/
|
|
||||||
package configpb
|
package configpb
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
import math "math"
|
import math "math"
|
||||||
import google_protobuf "github.com/golang/protobuf/ptypes/timestamp"
|
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ = proto.Marshal
|
var _ = proto.Marshal
|
||||||
@ -32,13 +22,35 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||||||
// TemporalLogConfig is a set of LogShardConfig messages, whose
|
// TemporalLogConfig is a set of LogShardConfig messages, whose
|
||||||
// time limits should be contiguous.
|
// time limits should be contiguous.
|
||||||
type TemporalLogConfig struct {
|
type TemporalLogConfig struct {
|
||||||
Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard" json:"shard,omitempty"`
|
Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
|
func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
|
||||||
func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
|
func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
|
||||||
func (*TemporalLogConfig) ProtoMessage() {}
|
func (*TemporalLogConfig) ProtoMessage() {}
|
||||||
func (*TemporalLogConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_multilog_3c9b797b88da6f07, []int{0}
|
||||||
|
}
|
||||||
|
func (m *TemporalLogConfig) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_TemporalLogConfig.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *TemporalLogConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_TemporalLogConfig.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *TemporalLogConfig) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_TemporalLogConfig.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *TemporalLogConfig) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_TemporalLogConfig.Size(m)
|
||||||
|
}
|
||||||
|
func (m *TemporalLogConfig) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_TemporalLogConfig.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_TemporalLogConfig proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
|
func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -50,23 +62,45 @@ func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
|
|||||||
// LogShardConfig describes the acceptable date range for a single shard of a temporal
|
// LogShardConfig describes the acceptable date range for a single shard of a temporal
|
||||||
// log.
|
// log.
|
||||||
type LogShardConfig struct {
|
type LogShardConfig struct {
|
||||||
Uri string `protobuf:"bytes,1,opt,name=uri" json:"uri,omitempty"`
|
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
|
||||||
// The log's public key in DER-encoded PKIX form.
|
// The log's public key in DER-encoded PKIX form.
|
||||||
PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
|
PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
|
||||||
// not_after_start defines the start of the range of acceptable NotAfter
|
// not_after_start defines the start of the range of acceptable NotAfter
|
||||||
// values, inclusive.
|
// values, inclusive.
|
||||||
// Leaving this unset implies no lower bound to the range.
|
// Leaving this unset implies no lower bound to the range.
|
||||||
NotAfterStart *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart" json:"not_after_start,omitempty"`
|
NotAfterStart *timestamp.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
|
||||||
// not_after_limit defines the end of the range of acceptable NotAfter values,
|
// not_after_limit defines the end of the range of acceptable NotAfter values,
|
||||||
// exclusive.
|
// exclusive.
|
||||||
// Leaving this unset implies no upper bound to the range.
|
// Leaving this unset implies no upper bound to the range.
|
||||||
NotAfterLimit *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit" json:"not_after_limit,omitempty"`
|
NotAfterLimit *timestamp.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
|
func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
|
||||||
func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
|
func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LogShardConfig) ProtoMessage() {}
|
func (*LogShardConfig) ProtoMessage() {}
|
||||||
func (*LogShardConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*LogShardConfig) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_multilog_3c9b797b88da6f07, []int{1}
|
||||||
|
}
|
||||||
|
func (m *LogShardConfig) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_LogShardConfig.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *LogShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_LogShardConfig.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *LogShardConfig) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LogShardConfig.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *LogShardConfig) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_LogShardConfig.Size(m)
|
||||||
|
}
|
||||||
|
func (m *LogShardConfig) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LogShardConfig.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LogShardConfig proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *LogShardConfig) GetUri() string {
|
func (m *LogShardConfig) GetUri() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -82,14 +116,14 @@ func (m *LogShardConfig) GetPublicKeyDer() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LogShardConfig) GetNotAfterStart() *google_protobuf.Timestamp {
|
func (m *LogShardConfig) GetNotAfterStart() *timestamp.Timestamp {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.NotAfterStart
|
return m.NotAfterStart
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LogShardConfig) GetNotAfterLimit() *google_protobuf.Timestamp {
|
func (m *LogShardConfig) GetNotAfterLimit() *timestamp.Timestamp {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.NotAfterLimit
|
return m.NotAfterLimit
|
||||||
}
|
}
|
||||||
@ -101,9 +135,9 @@ func init() {
|
|||||||
proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig")
|
proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("multilog.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("multilog.proto", fileDescriptor_multilog_3c9b797b88da6f07) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor_multilog_3c9b797b88da6f07 = []byte{
|
||||||
// 241 bytes of a gzipped FileDescriptorProto
|
// 241 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
|
||||||
0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a,
|
0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a,
|
||||||
|
2
vendor/github.com/google/certificate-transparency-go/client/getentries.go
generated
vendored
2
vendor/github.com/google/certificate-transparency-go/client/getentries.go
generated
vendored
@ -66,7 +66,7 @@ func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogE
|
|||||||
for i, entry := range resp.Entries {
|
for i, entry := range resp.Entries {
|
||||||
index := start + int64(i)
|
index := start + int64(i)
|
||||||
logEntry, err := ct.LogEntryFromLeaf(index, &entry)
|
logEntry, err := ct.LogEntryFromLeaf(index, &entry)
|
||||||
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
|
if x509.IsFatal(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
entries[i] = *logEntry
|
entries[i] = *logEntry
|
||||||
|
64
vendor/github.com/google/certificate-transparency-go/client/logclient.go
generated
vendored
64
vendor/github.com/google/certificate-transparency-go/client/logclient.go
generated
vendored
@ -19,7 +19,6 @@ package client
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -35,11 +34,19 @@ type LogClient struct {
|
|||||||
jsonclient.JSONClient
|
jsonclient.JSONClient
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckLogClient is an interface that allows (just) checking of various log contents.
|
||||||
|
type CheckLogClient interface {
|
||||||
|
BaseURI() string
|
||||||
|
GetSTH(context.Context) (*ct.SignedTreeHead, error)
|
||||||
|
GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error)
|
||||||
|
GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
// New constructs a new LogClient instance.
|
// New constructs a new LogClient instance.
|
||||||
// |uri| is the base URI of the CT log instance to interact with, e.g.
|
// |uri| is the base URI of the CT log instance to interact with, e.g.
|
||||||
// http://ct.googleapis.com/pilot
|
// https://ct.googleapis.com/pilot
|
||||||
// |hc| is the underlying client to be used for HTTP requests to the CT log.
|
// |hc| is the underlying client to be used for HTTP requests to the CT log.
|
||||||
// |opts| can be used to provide a customer logger interface and a public key
|
// |opts| can be used to provide a custom logger interface and a public key
|
||||||
// for signature verification.
|
// for signature verification.
|
||||||
func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) {
|
func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) {
|
||||||
logClient, err := jsonclient.New(uri, hc, opts)
|
logClient, err := jsonclient.New(uri, hc, opts)
|
||||||
@ -169,35 +176,16 @@ func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sth := ct.SignedTreeHead{
|
|
||||||
TreeSize: resp.TreeSize,
|
|
||||||
Timestamp: resp.Timestamp,
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(resp.SHA256RootHash) != sha256.Size {
|
sth, err := resp.ToSignedTreeHead()
|
||||||
return nil, RspError{
|
if err != nil {
|
||||||
Err: fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(resp.SHA256RootHash)),
|
|
||||||
StatusCode: httpRsp.StatusCode,
|
|
||||||
Body: body,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
copy(sth.SHA256RootHash[:], resp.SHA256RootHash)
|
|
||||||
|
|
||||||
var ds ct.DigitallySigned
|
|
||||||
if rest, err := tls.Unmarshal(resp.TreeHeadSignature, &ds); err != nil {
|
|
||||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
|
||||||
} else if len(rest) > 0 {
|
|
||||||
return nil, RspError{
|
|
||||||
Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
|
|
||||||
StatusCode: httpRsp.StatusCode,
|
|
||||||
Body: body,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sth.TreeHeadSignature = ds
|
|
||||||
if err := c.VerifySTHSignature(sth); err != nil {
|
|
||||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||||
}
|
}
|
||||||
return &sth, nil
|
|
||||||
|
if err := c.VerifySTHSignature(*sth); err != nil {
|
||||||
|
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||||
|
}
|
||||||
|
return sth, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is
|
// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is
|
||||||
@ -281,3 +269,21 @@ func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
|
|||||||
}
|
}
|
||||||
return roots, nil
|
return roots, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetEntryAndProof returns a log entry and audit path for the index of a leaf.
|
||||||
|
func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) {
|
||||||
|
base10 := 10
|
||||||
|
params := map[string]string{
|
||||||
|
"leaf_index": strconv.FormatUint(index, base10),
|
||||||
|
"tree_size": strconv.FormatUint(treeSize, base10),
|
||||||
|
}
|
||||||
|
var resp ct.GetEntryAndProofResponse
|
||||||
|
httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp)
|
||||||
|
if err != nil {
|
||||||
|
if httpRsp != nil {
|
||||||
|
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
10
vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
generated
vendored
Normal file
10
vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
steps:
|
||||||
|
- id: build_ctfe
|
||||||
|
name: gcr.io/cloud-builders/docker
|
||||||
|
args:
|
||||||
|
- build
|
||||||
|
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
|
||||||
|
- --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
|
||||||
|
- .
|
||||||
|
images:
|
||||||
|
- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
|
1
vendor/github.com/google/certificate-transparency-go/gometalinter.json
generated
vendored
1
vendor/github.com/google/certificate-transparency-go/gometalinter.json
generated
vendored
@ -1,4 +1,5 @@
|
|||||||
{
|
{
|
||||||
|
"Deadline": "60s",
|
||||||
"Linters": {
|
"Linters": {
|
||||||
"license": "./scripts/check_license.sh:PATH:LINE:MESSAGE",
|
"license": "./scripts/check_license.sh:PATH:LINE:MESSAGE",
|
||||||
"forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE",
|
"forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE",
|
||||||
|
7
vendor/github.com/google/certificate-transparency-go/jsonclient/client.go
generated
vendored
7
vendor/github.com/google/certificate-transparency-go/jsonclient/client.go
generated
vendored
@ -53,7 +53,7 @@ type backoffer interface {
|
|||||||
// JSONClient provides common functionality for interacting with a JSON server
|
// JSONClient provides common functionality for interacting with a JSON server
|
||||||
// that uses cryptographic signatures.
|
// that uses cryptographic signatures.
|
||||||
type JSONClient struct {
|
type JSONClient struct {
|
||||||
uri string // the base URI of the server. e.g. http://ct.googleapis/pilot
|
uri string // the base URI of the server. e.g. https://ct.googleapis/pilot
|
||||||
httpClient *http.Client // used to interact with the server via HTTP
|
httpClient *http.Client // used to interact with the server via HTTP
|
||||||
Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
|
Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
|
||||||
logger Logger // interface to use for logging warnings and errors
|
logger Logger // interface to use for logging warnings and errors
|
||||||
@ -139,6 +139,11 @@ func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BaseURI returns the base URI that the JSONClient makes queries to.
|
||||||
|
func (c *JSONClient) BaseURI() string {
|
||||||
|
return c.uri
|
||||||
|
}
|
||||||
|
|
||||||
// GetAndParse makes a HTTP GET call to the given path, and attempta to parse
|
// GetAndParse makes a HTTP GET call to the given path, and attempta to parse
|
||||||
// the response as a JSON representation of the rsp structure. Returns the
|
// the response as a JSON representation of the rsp structure. Returns the
|
||||||
// http.Response, the body of the response, and an error. Note that the
|
// http.Response, the body of the response, and an error. Note that the
|
||||||
|
156
vendor/github.com/google/certificate-transparency-go/serialization.go
generated
vendored
156
vendor/github.com/google/certificate-transparency-go/serialization.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/google/certificate-transparency-go/tls"
|
"github.com/google/certificate-transparency-go/tls"
|
||||||
"github.com/google/certificate-transparency-go/x509"
|
"github.com/google/certificate-transparency-go/x509"
|
||||||
@ -127,7 +128,7 @@ func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timesta
|
|||||||
chain := make([]*x509.Certificate, count)
|
chain := make([]*x509.Certificate, count)
|
||||||
for i := range chain {
|
for i := range chain {
|
||||||
cert, err := x509.ParseCertificate(rawChain[i].Data)
|
cert, err := x509.ParseCertificate(rawChain[i].Data)
|
||||||
if err != nil {
|
if x509.IsFatal(err) {
|
||||||
return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
|
return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
|
||||||
}
|
}
|
||||||
chain[i] = cert
|
chain[i] = cert
|
||||||
@ -189,6 +190,53 @@ func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, time
|
|||||||
return &leaf, nil
|
return &leaf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an
|
||||||
|
// SCT timestamp, where the leaf certificate at chain[0] is a certificate that
|
||||||
|
// contains embedded SCTs. It is assumed that the timestamp provided is from
|
||||||
|
// one of the SCTs embedded within the leaf certificate.
|
||||||
|
func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) {
|
||||||
|
// For building the leaf for a certificate and SCT where the SCT is embedded
|
||||||
|
// in the certificate, we need to build the original precertificate TBS
|
||||||
|
// data. First, parse the leaf cert and its issuer.
|
||||||
|
if len(chain) < 2 {
|
||||||
|
return nil, fmt.Errorf("no issuer cert available for precert leaf building")
|
||||||
|
}
|
||||||
|
issuer := chain[1]
|
||||||
|
cert := chain[0]
|
||||||
|
|
||||||
|
// Next, post-process the DER-encoded TBSCertificate, to remove the SCTList
|
||||||
|
// extension.
|
||||||
|
tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to remove SCT List extension: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MerkleTreeLeaf{
|
||||||
|
Version: V1,
|
||||||
|
LeafType: TimestampedEntryLeafType,
|
||||||
|
TimestampedEntry: &TimestampedEntry{
|
||||||
|
EntryType: PrecertLogEntryType,
|
||||||
|
Timestamp: timestamp,
|
||||||
|
PrecertEntry: &PreCert{
|
||||||
|
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
|
||||||
|
TBSCertificate: tbs,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf.
|
||||||
|
func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) {
|
||||||
|
leafData, err := tls.Marshal(*leaf)
|
||||||
|
if err != nil {
|
||||||
|
return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := append([]byte{TreeLeafPrefix}, leafData...)
|
||||||
|
leafHash := sha256.Sum256(data)
|
||||||
|
return leafHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific
|
// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific
|
||||||
// certificate transparency extended key usage.
|
// certificate transparency extended key usage.
|
||||||
func IsPreIssuer(issuer *x509.Certificate) bool {
|
func IsPreIssuer(issuer *x509.Certificate) bool {
|
||||||
@ -200,56 +248,100 @@ func IsPreIssuer(issuer *x509.Certificate) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data after JSON parsing)
|
// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
|
||||||
// into a LogEntry object (which includes x509.Certificate objects, after TLS and ASN.1 parsing).
|
// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure).
|
||||||
// Note that this function may return a valid LogEntry object and a non-nil error value, when
|
func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) {
|
||||||
// the error indicates a non-fatal parsing error (of type x509.NonFatalErrors).
|
ret := RawLogEntry{Index: index}
|
||||||
func LogEntryFromLeaf(index int64, leafEntry *LeafEntry) (*LogEntry, error) {
|
if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil {
|
||||||
var leaf MerkleTreeLeaf
|
return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err)
|
||||||
if rest, err := tls.Unmarshal(leafEntry.LeafInput, &leaf); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf for index %d: %v", index, err)
|
|
||||||
} else if len(rest) > 0 {
|
} else if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("trailing data (%d bytes) after MerkleTreeLeaf for index %d", len(rest), index)
|
return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest))
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
switch eType := ret.Leaf.TimestampedEntry.EntryType; eType {
|
||||||
entry := LogEntry{Index: index, Leaf: leaf}
|
|
||||||
switch leaf.TimestampedEntry.EntryType {
|
|
||||||
case X509LogEntryType:
|
case X509LogEntryType:
|
||||||
var certChain CertificateChain
|
var certChain CertificateChain
|
||||||
if rest, err := tls.Unmarshal(leafEntry.ExtraData, &certChain); err != nil {
|
if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal ExtraData for index %d: %v", index, err)
|
return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err)
|
||||||
} else if len(rest) > 0 {
|
} else if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("trailing data (%d bytes) after CertificateChain for index %d", len(rest), index)
|
return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest))
|
||||||
}
|
|
||||||
entry.Chain = certChain.Entries
|
|
||||||
entry.X509Cert, err = leaf.X509Certificate()
|
|
||||||
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse certificate in MerkleTreeLeaf for index %d: %v", index, err)
|
|
||||||
}
|
}
|
||||||
|
ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry
|
||||||
|
ret.Chain = certChain.Entries
|
||||||
|
|
||||||
case PrecertLogEntryType:
|
case PrecertLogEntryType:
|
||||||
var precertChain PrecertChainEntry
|
var precertChain PrecertChainEntry
|
||||||
if rest, err := tls.Unmarshal(leafEntry.ExtraData, &precertChain); err != nil {
|
if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry for index %d: %v", index, err)
|
return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err)
|
||||||
} else if len(rest) > 0 {
|
} else if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("trailing data (%d bytes) after PrecertChainEntry for index %d", len(rest), index)
|
return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest))
|
||||||
}
|
}
|
||||||
entry.Chain = precertChain.CertificateChain
|
ret.Cert = precertChain.PreCertificate
|
||||||
|
ret.Chain = precertChain.CertificateChain
|
||||||
|
|
||||||
|
default:
|
||||||
|
// TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types
|
||||||
|
// are not errors. We should revisit how we process this case.
|
||||||
|
return nil, fmt.Errorf("unknown entry type: %v", eType)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed
|
||||||
|
// (pre-)certificate.
|
||||||
|
//
|
||||||
|
// Note that this function may return a valid LogEntry object and a non-nil
|
||||||
|
// error value, when the error indicates a non-fatal parsing error.
|
||||||
|
func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) {
|
||||||
|
var err error
|
||||||
|
entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain}
|
||||||
|
|
||||||
|
switch eType := rle.Leaf.TimestampedEntry.EntryType; eType {
|
||||||
|
case X509LogEntryType:
|
||||||
|
entry.X509Cert, err = rle.Leaf.X509Certificate()
|
||||||
|
if x509.IsFatal(err) {
|
||||||
|
return nil, fmt.Errorf("failed to parse certificate: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case PrecertLogEntryType:
|
||||||
var tbsCert *x509.Certificate
|
var tbsCert *x509.Certificate
|
||||||
tbsCert, err = leaf.Precertificate()
|
tbsCert, err = rle.Leaf.Precertificate()
|
||||||
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
|
if x509.IsFatal(err) {
|
||||||
return nil, fmt.Errorf("failed to parse precertificate in MerkleTreeLeaf for index %d: %v", index, err)
|
return nil, fmt.Errorf("failed to parse precertificate: %v", err)
|
||||||
}
|
}
|
||||||
entry.Precert = &Precertificate{
|
entry.Precert = &Precertificate{
|
||||||
Submitted: precertChain.PreCertificate,
|
Submitted: rle.Cert,
|
||||||
IssuerKeyHash: leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
|
IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
|
||||||
TBSCertificate: tbsCert,
|
TBSCertificate: tbsCert,
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("saw unknown entry type at index %d: %v", index, leaf.TimestampedEntry.EntryType)
|
return nil, fmt.Errorf("unknown entry type: %v", eType)
|
||||||
}
|
}
|
||||||
// err may hold a x509.NonFatalErrors object.
|
|
||||||
|
// err may be non-nil for a non-fatal error.
|
||||||
return &entry, err
|
return &entry, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
|
||||||
|
// after JSON parsing) into a LogEntry object (which includes x509.Certificate
|
||||||
|
// objects, after TLS and ASN.1 parsing).
|
||||||
|
//
|
||||||
|
// Note that this function may return a valid LogEntry object and a non-nil
|
||||||
|
// error value, when the error indicates a non-fatal parsing error.
|
||||||
|
func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) {
|
||||||
|
rle, err := RawLogEntryFromLeaf(index, leaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rle.ToLogEntry()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds
|
||||||
|
// since UNIX epoch) to a Go Time.
|
||||||
|
func TimestampToTime(ts uint64) time.Time {
|
||||||
|
secs := int64(ts / 1000)
|
||||||
|
msecs := int64(ts % 1000)
|
||||||
|
return time.Unix(secs, msecs*1000000)
|
||||||
|
}
|
||||||
|
21
vendor/github.com/google/certificate-transparency-go/signatures.go
generated
vendored
21
vendor/github.com/google/certificate-transparency-go/signatures.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
|||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
@ -29,8 +29,10 @@ import (
|
|||||||
"github.com/google/certificate-transparency-go/x509"
|
"github.com/google/certificate-transparency-go/x509"
|
||||||
)
|
)
|
||||||
|
|
||||||
var allowVerificationWithNonCompliantKeys = flag.Bool("allow_verification_with_non_compliant_keys", false,
|
// AllowVerificationWithNonCompliantKeys may be set to true in order to allow
|
||||||
"Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.")
|
// SignatureVerifier to use keys which are technically non-compliant with
|
||||||
|
// RFC6962.
|
||||||
|
var AllowVerificationWithNonCompliantKeys = false
|
||||||
|
|
||||||
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
|
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
|
||||||
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
|
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
|
||||||
@ -42,6 +44,15 @@ func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
|
|||||||
return k, sha256.Sum256(p.Bytes), rest, err
|
return k, sha256.Sum256(p.Bytes), rest, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PublicKeyFromB64 parses a base64-encoded public key.
|
||||||
|
func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) {
|
||||||
|
der, err := base64.StdEncoding.DecodeString(b64PubKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding public key: %s", err)
|
||||||
|
}
|
||||||
|
return x509.ParsePKIXPublicKey(der)
|
||||||
|
}
|
||||||
|
|
||||||
// SignatureVerifier can verify signatures on SCTs and STHs
|
// SignatureVerifier can verify signatures on SCTs and STHs
|
||||||
type SignatureVerifier struct {
|
type SignatureVerifier struct {
|
||||||
pubKey crypto.PublicKey
|
pubKey crypto.PublicKey
|
||||||
@ -53,7 +64,7 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
|
|||||||
case *rsa.PublicKey:
|
case *rsa.PublicKey:
|
||||||
if pkType.N.BitLen() < 2048 {
|
if pkType.N.BitLen() < 2048 {
|
||||||
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
|
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
|
||||||
if !(*allowVerificationWithNonCompliantKeys) {
|
if !AllowVerificationWithNonCompliantKeys {
|
||||||
return nil, e
|
return nil, e
|
||||||
}
|
}
|
||||||
log.Printf("WARNING: %v", e)
|
log.Printf("WARNING: %v", e)
|
||||||
@ -62,7 +73,7 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
|
|||||||
params := *(pkType.Params())
|
params := *(pkType.Params())
|
||||||
if params != *elliptic.P256().Params() {
|
if params != *elliptic.P256().Params() {
|
||||||
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
|
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
|
||||||
if !(*allowVerificationWithNonCompliantKeys) {
|
if !AllowVerificationWithNonCompliantKeys {
|
||||||
return nil, e
|
return nil, e
|
||||||
}
|
}
|
||||||
log.Printf("WARNING: %v", e)
|
log.Printf("WARNING: %v", e)
|
||||||
|
23
vendor/github.com/google/certificate-transparency-go/tls/types.go
generated
vendored
23
vendor/github.com/google/certificate-transparency-go/tls/types.go
generated
vendored
@ -14,7 +14,13 @@
|
|||||||
|
|
||||||
package tls
|
package tls
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/dsa"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/rsa"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
// DigitallySigned gives information about a signature, including the algorithm used
|
// DigitallySigned gives information about a signature, including the algorithm used
|
||||||
// and the signature value. Defined in RFC 5246 s4.7.
|
// and the signature value. Defined in RFC 5246 s4.7.
|
||||||
@ -94,3 +100,18 @@ func (s SignatureAlgorithm) String() string {
|
|||||||
return fmt.Sprintf("UNKNOWN(%d)", s)
|
return fmt.Sprintf("UNKNOWN(%d)", s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SignatureAlgorithmFromPubKey returns the algorithm used for this public key.
|
||||||
|
// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous.
|
||||||
|
func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm {
|
||||||
|
switch k.(type) {
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
return ECDSA
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
return RSA
|
||||||
|
case *dsa.PublicKey:
|
||||||
|
return DSA
|
||||||
|
default:
|
||||||
|
return Anonymous
|
||||||
|
}
|
||||||
|
}
|
||||||
|
68
vendor/github.com/google/certificate-transparency-go/types.go
generated
vendored
68
vendor/github.com/google/certificate-transparency-go/types.go
generated
vendored
@ -54,6 +54,12 @@ func (e LogEntryType) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance.
|
||||||
|
const (
|
||||||
|
TreeLeafPrefix = byte(0x00)
|
||||||
|
TreeNodePrefix = byte(0x01)
|
||||||
|
)
|
||||||
|
|
||||||
// MerkleLeafType represents the MerkleLeafType enum from section 3.4:
|
// MerkleLeafType represents the MerkleLeafType enum from section 3.4:
|
||||||
// enum { timestamped_entry(0), (255) } MerkleLeafType;
|
// enum { timestamped_entry(0), (255) } MerkleLeafType;
|
||||||
type MerkleLeafType tls.Enum // tls:"maxval:255"
|
type MerkleLeafType tls.Enum // tls:"maxval:255"
|
||||||
@ -193,6 +199,25 @@ func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
|
|||||||
return d.FromBase64String(content)
|
return d.FromBase64String(content)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log.
|
||||||
|
type RawLogEntry struct {
|
||||||
|
// Index is a position of the entry in the log.
|
||||||
|
Index int64
|
||||||
|
// Leaf is a parsed Merkle leaf hash input.
|
||||||
|
Leaf MerkleTreeLeaf
|
||||||
|
// Cert is:
|
||||||
|
// - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType.
|
||||||
|
// - A precertificate if Leaf.TimestampedEntry.EntryType is
|
||||||
|
// PrecertLogEntryType, in the form of a DER-encoded Certificate as
|
||||||
|
// originally added (which includes the poison extension and a signature
|
||||||
|
// generated over the pre-cert by the pre-cert issuer).
|
||||||
|
// - Empty otherwise.
|
||||||
|
Cert ASN1Cert
|
||||||
|
// Chain is the issuing certificate chain starting with the issuer of Cert,
|
||||||
|
// or an empty slice if Cert is empty.
|
||||||
|
Chain []ASN1Cert
|
||||||
|
}
|
||||||
|
|
||||||
// LogEntry represents the (parsed) contents of an entry in a CT log. This is described
|
// LogEntry represents the (parsed) contents of an entry in a CT log. This is described
|
||||||
// in section 3.1, but note that this structure does *not* match the TLS structure
|
// in section 3.1, but note that this structure does *not* match the TLS structure
|
||||||
// defined there (the TLS structure is never used directly in RFC6962).
|
// defined there (the TLS structure is never used directly in RFC6962).
|
||||||
@ -368,7 +393,27 @@ func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) {
|
|||||||
return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate)
|
return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIEndpoint is a string that represents one of the Certificate Transparency
|
||||||
|
// Log API endpoints.
|
||||||
|
type APIEndpoint string
|
||||||
|
|
||||||
|
// Certificate Transparency Log API endpoints; see section 4.
|
||||||
|
// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If
|
||||||
|
// changing these constants, may need to change those too.
|
||||||
|
const (
|
||||||
|
AddChainStr APIEndpoint = "add-chain"
|
||||||
|
AddPreChainStr APIEndpoint = "add-pre-chain"
|
||||||
|
GetSTHStr APIEndpoint = "get-sth"
|
||||||
|
GetEntriesStr APIEndpoint = "get-entries"
|
||||||
|
GetProofByHashStr APIEndpoint = "get-proof-by-hash"
|
||||||
|
GetSTHConsistencyStr APIEndpoint = "get-sth-consistency"
|
||||||
|
GetRootsStr APIEndpoint = "get-roots"
|
||||||
|
GetEntryAndProofStr APIEndpoint = "get-entry-and-proof"
|
||||||
|
)
|
||||||
|
|
||||||
// URI paths for Log requests; see section 4.
|
// URI paths for Log requests; see section 4.
|
||||||
|
// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If
|
||||||
|
// changing these constants, may need to change those too.
|
||||||
const (
|
const (
|
||||||
AddChainPath = "/ct/v1/add-chain"
|
AddChainPath = "/ct/v1/add-chain"
|
||||||
AddPreChainPath = "/ct/v1/add-pre-chain"
|
AddPreChainPath = "/ct/v1/add-pre-chain"
|
||||||
@ -415,6 +460,29 @@ type GetSTHResponse struct {
|
|||||||
TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH
|
TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse.
|
||||||
|
func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) {
|
||||||
|
sth := SignedTreeHead{
|
||||||
|
TreeSize: r.TreeSize,
|
||||||
|
Timestamp: r.Timestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(r.SHA256RootHash) != sha256.Size {
|
||||||
|
return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash))
|
||||||
|
}
|
||||||
|
copy(sth.SHA256RootHash[:], r.SHA256RootHash)
|
||||||
|
|
||||||
|
var ds DigitallySigned
|
||||||
|
if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil {
|
||||||
|
return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
|
||||||
|
} else if len(rest) > 0 {
|
||||||
|
return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
|
||||||
|
}
|
||||||
|
sth.TreeHeadSignature = ds
|
||||||
|
|
||||||
|
return &sth, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency
|
// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency
|
||||||
// GET method from section 4.4. (The corresponding GET request has parameters 'first' and
|
// GET method from section 4.4. (The corresponding GET request has parameters 'first' and
|
||||||
// 'second'.)
|
// 'second'.)
|
||||||
|
4
vendor/github.com/google/certificate-transparency-go/x509/BUILD
generated
vendored
4
vendor/github.com/google/certificate-transparency-go/x509/BUILD
generated
vendored
@ -4,6 +4,7 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"cert_pool.go",
|
"cert_pool.go",
|
||||||
|
"curves.go",
|
||||||
"error.go",
|
"error.go",
|
||||||
"errors.go",
|
"errors.go",
|
||||||
"names.go",
|
"names.go",
|
||||||
@ -12,6 +13,8 @@ go_library(
|
|||||||
"pem_decrypt.go",
|
"pem_decrypt.go",
|
||||||
"pkcs1.go",
|
"pkcs1.go",
|
||||||
"pkcs8.go",
|
"pkcs8.go",
|
||||||
|
"ptr_sysptr_windows.go",
|
||||||
|
"ptr_uint_windows.go",
|
||||||
"revoked.go",
|
"revoked.go",
|
||||||
"root.go",
|
"root.go",
|
||||||
"root_bsd.go",
|
"root_bsd.go",
|
||||||
@ -25,6 +28,7 @@ go_library(
|
|||||||
"root_solaris.go",
|
"root_solaris.go",
|
||||||
"root_unix.go",
|
"root_unix.go",
|
||||||
"root_windows.go",
|
"root_windows.go",
|
||||||
|
"rpki.go",
|
||||||
"sec1.go",
|
"sec1.go",
|
||||||
"verify.go",
|
"verify.go",
|
||||||
"x509.go",
|
"x509.go",
|
||||||
|
2
vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go
generated
vendored
2
vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go
generated
vendored
@ -121,7 +121,7 @@ func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cert, err := ParseCertificate(block.Bytes)
|
cert, err := ParseCertificate(block.Bytes)
|
||||||
if err != nil {
|
if IsFatal(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
37
vendor/github.com/google/certificate-transparency-go/x509/curves.go
generated
vendored
Normal file
37
vendor/github.com/google/certificate-transparency-go/x509/curves.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package x509
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/elliptic"
|
||||||
|
"math/big"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file holds ECC curves that are not supported by the main Go crypto/elliptic
|
||||||
|
// library, but which have been observed in certificates in the wild.
|
||||||
|
|
||||||
|
var initonce sync.Once
|
||||||
|
var p192r1 *elliptic.CurveParams
|
||||||
|
|
||||||
|
func initAllCurves() {
|
||||||
|
initSECP192R1()
|
||||||
|
}
|
||||||
|
|
||||||
|
func initSECP192R1() {
|
||||||
|
// See SEC-2, section 2.2.2
|
||||||
|
p192r1 = &elliptic.CurveParams{Name: "P-192"}
|
||||||
|
p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16)
|
||||||
|
p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16)
|
||||||
|
p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16)
|
||||||
|
p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16)
|
||||||
|
p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16)
|
||||||
|
p192r1.BitSize = 192
|
||||||
|
}
|
||||||
|
|
||||||
|
func secp192r1() elliptic.Curve {
|
||||||
|
initonce.Do(initAllCurves)
|
||||||
|
return p192r1
|
||||||
|
}
|
20
vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
generated
vendored
Normal file
20
vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.11
|
||||||
|
|
||||||
|
package x509
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For Go versions >= 1.11, the ExtraPolicyPara field in
|
||||||
|
// syscall.CertChainPolicyPara is of type syscall.Pointer. See:
|
||||||
|
// https://github.com/golang/go/commit/4869ec00e87ef
|
||||||
|
|
||||||
|
func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer {
|
||||||
|
return (syscall.Pointer)(p)
|
||||||
|
}
|
17
vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
generated
vendored
Normal file
17
vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.11
|
||||||
|
|
||||||
|
package x509
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// For Go versions before 1.11, the ExtraPolicyPara field in
|
||||||
|
// syscall.CertChainPolicyPara was of type uintptr. See:
|
||||||
|
// https://github.com/golang/go/commit/4869ec00e87ef
|
||||||
|
|
||||||
|
func convertToPolicyParaType(p unsafe.Pointer) uintptr {
|
||||||
|
return uintptr(p)
|
||||||
|
}
|
9
vendor/github.com/google/certificate-transparency-go/x509/revoked.go
generated
vendored
9
vendor/github.com/google/certificate-transparency-go/x509/revoked.go
generated
vendored
@ -14,12 +14,15 @@ import (
|
|||||||
"github.com/google/certificate-transparency-go/x509/pkix"
|
"github.com/google/certificate-transparency-go/x509/pkix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
|
||||||
var (
|
var (
|
||||||
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
|
|
||||||
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
|
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
|
||||||
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
|
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
|
||||||
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
|
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
|
||||||
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
|
)
|
||||||
|
|
||||||
|
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
|
||||||
|
var (
|
||||||
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
|
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
|
||||||
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
|
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
|
||||||
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
|
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
|
||||||
@ -238,7 +241,7 @@ func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
|
|||||||
}
|
}
|
||||||
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
|
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
|
||||||
// RFC 5280 s5.2.7
|
// RFC 5280 s5.2.7
|
||||||
var aia []authorityInfoAccess
|
var aia []accessDescription
|
||||||
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
|
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
|
||||||
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
|
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
|
||||||
} else if len(rest) != 0 {
|
} else if len(rest) != 0 {
|
||||||
|
2
vendor/github.com/google/certificate-transparency-go/x509/root_windows.go
generated
vendored
2
vendor/github.com/google/certificate-transparency-go/x509/root_windows.go
generated
vendored
@ -109,7 +109,7 @@ func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContex
|
|||||||
sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
|
sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
|
||||||
|
|
||||||
para := &syscall.CertChainPolicyPara{
|
para := &syscall.CertChainPolicyPara{
|
||||||
ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)),
|
ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)),
|
||||||
}
|
}
|
||||||
para.Size = uint32(unsafe.Sizeof(*para))
|
para.Size = uint32(unsafe.Sizeof(*para))
|
||||||
|
|
||||||
|
242
vendor/github.com/google/certificate-transparency-go/x509/rpki.go
generated
vendored
Normal file
242
vendor/github.com/google/certificate-transparency-go/x509/rpki.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package x509
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/google/certificate-transparency-go/asn1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string,
|
||||||
|
// where the BitLength field holds the prefix length.
|
||||||
|
type IPAddressPrefix asn1.BitString
|
||||||
|
|
||||||
|
// IPAddressRange describes an (inclusive) IP address range.
|
||||||
|
type IPAddressRange struct {
|
||||||
|
Min IPAddressPrefix
|
||||||
|
Max IPAddressPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
// Most relevant values for AFI from:
|
||||||
|
// http://www.iana.org/assignments/address-family-numbers.
|
||||||
|
const (
|
||||||
|
IPv4AddressFamilyIndicator = uint16(1)
|
||||||
|
IPv6AddressFamilyIndicator = uint16(2)
|
||||||
|
)
|
||||||
|
|
||||||
|
// IPAddressFamilyBlocks describes a set of ranges of IP addresses.
|
||||||
|
type IPAddressFamilyBlocks struct {
|
||||||
|
// AFI holds an address family indicator from
|
||||||
|
// http://www.iana.org/assignments/address-family-numbers.
|
||||||
|
AFI uint16
|
||||||
|
// SAFI holds a subsequent address family indicator from
|
||||||
|
// http://www.iana.org/assignments/safi-namespace.
|
||||||
|
SAFI byte
|
||||||
|
// InheritFromIssuer indicates that the set of addresses should
|
||||||
|
// be taken from the issuer's certificate.
|
||||||
|
InheritFromIssuer bool
|
||||||
|
// AddressPrefixes holds prefixes if InheritFromIssuer is false.
|
||||||
|
AddressPrefixes []IPAddressPrefix
|
||||||
|
// AddressRanges holds ranges if InheritFromIssuer is false.
|
||||||
|
AddressRanges []IPAddressRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal types for asn1 unmarshalling.
|
||||||
|
type ipAddressFamily struct {
|
||||||
|
AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI
|
||||||
|
Choice asn1.RawValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internally, use raw asn1.BitString rather than the IPAddressPrefix
|
||||||
|
// type alias (so that asn1.Unmarshal() decodes properly).
|
||||||
|
type ipAddressRange struct {
|
||||||
|
Min asn1.BitString
|
||||||
|
Max asn1.BitString
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks {
|
||||||
|
// RFC 3779 2.2.3
|
||||||
|
// IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
|
||||||
|
//
|
||||||
|
// IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
|
||||||
|
// addressFamily OCTET STRING (SIZE (2..3)),
|
||||||
|
// ipAddressChoice IPAddressChoice }
|
||||||
|
//
|
||||||
|
// IPAddressChoice ::= CHOICE {
|
||||||
|
// inherit NULL, -- inherit from issuer --
|
||||||
|
// addressesOrRanges SEQUENCE OF IPAddressOrRange }
|
||||||
|
//
|
||||||
|
// IPAddressOrRange ::= CHOICE {
|
||||||
|
// addressPrefix IPAddress,
|
||||||
|
// addressRange IPAddressRange }
|
||||||
|
//
|
||||||
|
// IPAddressRange ::= SEQUENCE {
|
||||||
|
// min IPAddress,
|
||||||
|
// max IPAddress }
|
||||||
|
//
|
||||||
|
// IPAddress ::= BIT STRING
|
||||||
|
|
||||||
|
var addrBlocks []ipAddressFamily
|
||||||
|
if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err))
|
||||||
|
return nil
|
||||||
|
} else if len(rest) != 0 {
|
||||||
|
nfe.AddError(errors.New("trailing data after ipAddrBlocks extension"))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var results []*IPAddressFamilyBlocks
|
||||||
|
for i, block := range addrBlocks {
|
||||||
|
var fam IPAddressFamilyBlocks
|
||||||
|
if l := len(block.AddressFamily); l < 2 || l > 3 {
|
||||||
|
nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2])
|
||||||
|
if len(block.AddressFamily) > 2 {
|
||||||
|
fam.SAFI = block.AddressFamily[2]
|
||||||
|
}
|
||||||
|
// IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
|
||||||
|
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
|
||||||
|
if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) {
|
||||||
|
fam.InheritFromIssuer = true
|
||||||
|
results = append(results, &fam)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var addrRanges []asn1.RawValue
|
||||||
|
if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for j, ar := range addrRanges {
|
||||||
|
// Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit)
|
||||||
|
// tags -- here, either BIT STRING or SEQUENCE.
|
||||||
|
switch ar.Tag {
|
||||||
|
case asn1.TagBitString:
|
||||||
|
// BIT STRING for single prefix IPAddress
|
||||||
|
var val asn1.BitString
|
||||||
|
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val))
|
||||||
|
|
||||||
|
case asn1.TagSequence:
|
||||||
|
var val ipAddressRange
|
||||||
|
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)})
|
||||||
|
|
||||||
|
default:
|
||||||
|
nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results = append(results, &fam)
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing
|
||||||
|
// domain identifiers).
|
||||||
|
type ASIDRange struct {
|
||||||
|
Min int
|
||||||
|
Max int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing
|
||||||
|
// domain identifiers).
|
||||||
|
type ASIdentifiers struct {
|
||||||
|
// InheritFromIssuer indicates that the set of AS identifiers should
|
||||||
|
// be taken from the issuer's certificate.
|
||||||
|
InheritFromIssuer bool
|
||||||
|
// ASIDs holds AS identifiers if InheritFromIssuer is false.
|
||||||
|
ASIDs []int
|
||||||
|
// ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false.
|
||||||
|
ASIDRanges []ASIDRange
|
||||||
|
}
|
||||||
|
|
||||||
|
type asIdentifiers struct {
|
||||||
|
ASNum asn1.RawValue `asn1:"optional,tag:0"`
|
||||||
|
RDI asn1.RawValue `asn1:"optional,tag:1"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers {
|
||||||
|
// RFC 3779 2.3.2
|
||||||
|
// ASIdentifierChoice ::= CHOICE {
|
||||||
|
// inherit NULL, -- inherit from issuer --
|
||||||
|
// asIdsOrRanges SEQUENCE OF ASIdOrRange }
|
||||||
|
// ASIdOrRange ::= CHOICE {
|
||||||
|
// id ASId,
|
||||||
|
// range ASRange }
|
||||||
|
// ASRange ::= SEQUENCE {
|
||||||
|
// min ASId,
|
||||||
|
// max ASId }
|
||||||
|
// ASId ::= INTEGER
|
||||||
|
if len(val.FullBytes) == 0 { // OPTIONAL
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
|
||||||
|
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
|
||||||
|
if bytes.Equal(val.Bytes, asn1.NullBytes) {
|
||||||
|
return &ASIdentifiers{InheritFromIssuer: true}
|
||||||
|
}
|
||||||
|
var ids []asn1.RawValue
|
||||||
|
if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err))
|
||||||
|
return nil
|
||||||
|
} else if len(rest) != 0 {
|
||||||
|
nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges"))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var asID ASIdentifiers
|
||||||
|
for i, id := range ids {
|
||||||
|
// Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit)
|
||||||
|
// tags -- here, either INTEGER or SEQUENCE.
|
||||||
|
switch id.Tag {
|
||||||
|
case asn1.TagInteger:
|
||||||
|
var val int
|
||||||
|
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
asID.ASIDs = append(asID.ASIDs, val)
|
||||||
|
|
||||||
|
case asn1.TagSequence:
|
||||||
|
var val ASIDRange
|
||||||
|
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
asID.ASIDRanges = append(asID.ASIDRanges, val)
|
||||||
|
|
||||||
|
default:
|
||||||
|
nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &asID
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) {
|
||||||
|
// RFC 3779 2.3.2
|
||||||
|
// ASIdentifiers ::= SEQUENCE {
|
||||||
|
// asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
|
||||||
|
// rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
|
||||||
|
var asIDs asIdentifiers
|
||||||
|
if rest, err := asn1.Unmarshal(data, &asIDs); err != nil {
|
||||||
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err))
|
||||||
|
return nil, nil
|
||||||
|
} else if len(rest) != 0 {
|
||||||
|
nfe.AddError(errors.New("trailing data after ASIdentifiers extension"))
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe)
|
||||||
|
}
|
5
vendor/github.com/google/certificate-transparency-go/x509/sec1.go
generated
vendored
5
vendor/github.com/google/certificate-transparency-go/x509/sec1.go
generated
vendored
@ -72,11 +72,12 @@ func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *e
|
|||||||
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
|
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var nfe NonFatalErrors
|
||||||
var curve elliptic.Curve
|
var curve elliptic.Curve
|
||||||
if namedCurveOID != nil {
|
if namedCurveOID != nil {
|
||||||
curve = namedCurveFromOID(*namedCurveOID)
|
curve = namedCurveFromOID(*namedCurveOID, &nfe)
|
||||||
} else {
|
} else {
|
||||||
curve = namedCurveFromOID(privKey.NamedCurveOID)
|
curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe)
|
||||||
}
|
}
|
||||||
if curve == nil {
|
if curve == nil {
|
||||||
return nil, errors.New("x509: unknown elliptic curve")
|
return nil, errors.New("x509: unknown elliptic curve")
|
||||||
|
119
vendor/github.com/google/certificate-transparency-go/x509/verify.go
generated
vendored
119
vendor/github.com/google/certificate-transparency-go/x509/verify.go
generated
vendored
@ -12,9 +12,12 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/google/certificate-transparency-go/asn1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type InvalidReason int
|
type InvalidReason int
|
||||||
@ -174,19 +177,29 @@ var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificat
|
|||||||
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
|
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
|
||||||
// because other PKIX verification APIs have ended up needing many options.
|
// because other PKIX verification APIs have ended up needing many options.
|
||||||
type VerifyOptions struct {
|
type VerifyOptions struct {
|
||||||
DNSName string
|
DNSName string
|
||||||
Intermediates *CertPool
|
Intermediates *CertPool
|
||||||
Roots *CertPool // if nil, the system roots are used
|
Roots *CertPool // if nil, the system roots are used
|
||||||
CurrentTime time.Time // if zero, the current time is used
|
CurrentTime time.Time // if zero, the current time is used
|
||||||
DisableTimeChecks bool
|
// Options to disable various verification checks.
|
||||||
// KeyUsage specifies which Extended Key Usage values are acceptable.
|
DisableTimeChecks bool
|
||||||
// An empty list means ExtKeyUsageServerAuth. Key usage is considered a
|
DisableCriticalExtensionChecks bool
|
||||||
// constraint down the chain which mirrors Windows CryptoAPI behavior,
|
DisableNameChecks bool
|
||||||
// but not the spec. To accept any key usage, include ExtKeyUsageAny.
|
DisableEKUChecks bool
|
||||||
|
DisablePathLenChecks bool
|
||||||
|
DisableNameConstraintChecks bool
|
||||||
|
// KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
|
||||||
|
// certificate is accepted if it contains any of the listed values. An empty
|
||||||
|
// list means ExtKeyUsageServerAuth. To accept any key usage, include
|
||||||
|
// ExtKeyUsageAny.
|
||||||
|
//
|
||||||
|
// Certificate chains are required to nest extended key usage values,
|
||||||
|
// irrespective of this value. This matches the Windows CryptoAPI behavior,
|
||||||
|
// but not the spec.
|
||||||
KeyUsages []ExtKeyUsage
|
KeyUsages []ExtKeyUsage
|
||||||
// MaxConstraintComparisions is the maximum number of comparisons to
|
// MaxConstraintComparisions is the maximum number of comparisons to
|
||||||
// perform when checking a given certificate's name constraints. If
|
// perform when checking a given certificate's name constraints. If
|
||||||
// zero, a sensible default is used. This limit prevents pathalogical
|
// zero, a sensible default is used. This limit prevents pathological
|
||||||
// certificates from consuming excessive amounts of CPU time when
|
// certificates from consuming excessive amounts of CPU time when
|
||||||
// validating.
|
// validating.
|
||||||
MaxConstraintComparisions int
|
MaxConstraintComparisions int
|
||||||
@ -544,11 +557,16 @@ func (c *Certificate) checkNameConstraints(count *int,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
checkingAgainstIssuerCert = iota
|
||||||
|
checkingAgainstLeafCert
|
||||||
|
)
|
||||||
|
|
||||||
// ekuPermittedBy returns true iff the given extended key usage is permitted by
|
// ekuPermittedBy returns true iff the given extended key usage is permitted by
|
||||||
// the given EKU from a certificate. Normally, this would be a simple
|
// the given EKU from a certificate. Normally, this would be a simple
|
||||||
// comparison plus a special case for the “any” EKU. But, in order to support
|
// comparison plus a special case for the “any” EKU. But, in order to support
|
||||||
// existing certificates, some exceptions are made.
|
// existing certificates, some exceptions are made.
|
||||||
func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
|
func ekuPermittedBy(eku, certEKU ExtKeyUsage, context int) bool {
|
||||||
if certEKU == ExtKeyUsageAny || eku == certEKU {
|
if certEKU == ExtKeyUsageAny || eku == certEKU {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -565,28 +583,33 @@ func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
|
|||||||
eku = mapServerAuthEKUs(eku)
|
eku = mapServerAuthEKUs(eku)
|
||||||
certEKU = mapServerAuthEKUs(certEKU)
|
certEKU = mapServerAuthEKUs(certEKU)
|
||||||
|
|
||||||
if eku == certEKU ||
|
if eku == certEKU {
|
||||||
// ServerAuth in a CA permits ClientAuth in the leaf.
|
return true
|
||||||
(eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
|
}
|
||||||
|
|
||||||
|
// If checking a requested EKU against the list in a leaf certificate there
|
||||||
|
// are fewer exceptions.
|
||||||
|
if context == checkingAgainstLeafCert {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerAuth in a CA permits ClientAuth in the leaf.
|
||||||
|
return (eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
|
||||||
// Any CA may issue an OCSP responder certificate.
|
// Any CA may issue an OCSP responder certificate.
|
||||||
eku == ExtKeyUsageOCSPSigning ||
|
eku == ExtKeyUsageOCSPSigning ||
|
||||||
// Code-signing CAs can use Microsoft's commercial and
|
// Code-signing CAs can use Microsoft's commercial and
|
||||||
// kernel-mode EKUs.
|
// kernel-mode EKUs.
|
||||||
((eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning) {
|
(eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isValid performs validity checks on c given that it is a candidate to append
|
// isValid performs validity checks on c given that it is a candidate to append
|
||||||
// to the chain in currentChain.
|
// to the chain in currentChain.
|
||||||
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
|
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
|
||||||
if len(c.UnhandledCriticalExtensions) > 0 {
|
if !opts.DisableCriticalExtensionChecks && len(c.UnhandledCriticalExtensions) > 0 {
|
||||||
return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]}
|
return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(currentChain) > 0 {
|
if !opts.DisableNameChecks && len(currentChain) > 0 {
|
||||||
child := currentChain[len(currentChain)-1]
|
child := currentChain[len(currentChain)-1]
|
||||||
if !bytes.Equal(child.RawIssuer, c.RawSubject) {
|
if !bytes.Equal(child.RawIssuer, c.RawSubject) {
|
||||||
return CertificateInvalidError{c, NameMismatch, ""}
|
return CertificateInvalidError{c, NameMismatch, ""}
|
||||||
@ -617,7 +640,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||||||
leaf = currentChain[0]
|
leaf = currentChain[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
if (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() {
|
if !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() {
|
||||||
sanExtension, ok := leaf.getSANExtension()
|
sanExtension, ok := leaf.getSANExtension()
|
||||||
if !ok {
|
if !ok {
|
||||||
// This is the deprecated, legacy case of depending on
|
// This is the deprecated, legacy case of depending on
|
||||||
@ -633,8 +656,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||||||
name := string(data)
|
name := string(data)
|
||||||
mailbox, ok := parseRFC2821Mailbox(name)
|
mailbox, ok := parseRFC2821Mailbox(name)
|
||||||
if !ok {
|
if !ok {
|
||||||
// This certificate should not have parsed.
|
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
|
||||||
return errors.New("x509: internal error: rfc822Name SAN failed to parse")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
|
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
|
||||||
@ -646,6 +668,10 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||||||
|
|
||||||
case nameTypeDNS:
|
case nameTypeDNS:
|
||||||
name := string(data)
|
name := string(data)
|
||||||
|
if _, ok := domainToReverseLabels(name); !ok {
|
||||||
|
return fmt.Errorf("x509: cannot parse dnsName %q", name)
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
|
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
|
||||||
func(parsedName, constraint interface{}) (bool, error) {
|
func(parsedName, constraint interface{}) (bool, error) {
|
||||||
return matchDomainConstraint(parsedName.(string), constraint.(string))
|
return matchDomainConstraint(parsedName.(string), constraint.(string))
|
||||||
@ -692,7 +718,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
checkEKUs := certType == intermediateCertificate
|
checkEKUs := !opts.DisableEKUChecks && certType == intermediateCertificate
|
||||||
|
|
||||||
// If no extended key usages are specified, then all are acceptable.
|
// If no extended key usages are specified, then all are acceptable.
|
||||||
if checkEKUs && (len(c.ExtKeyUsage) == 0 && len(c.UnknownExtKeyUsage) == 0) {
|
if checkEKUs && (len(c.ExtKeyUsage) == 0 && len(c.UnknownExtKeyUsage) == 0) {
|
||||||
@ -719,7 +745,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||||||
|
|
||||||
for _, caEKU := range c.ExtKeyUsage {
|
for _, caEKU := range c.ExtKeyUsage {
|
||||||
comparisonCount++
|
comparisonCount++
|
||||||
if ekuPermittedBy(eku, caEKU) {
|
if ekuPermittedBy(eku, caEKU, checkingAgainstIssuerCert) {
|
||||||
continue NextEKU
|
continue NextEKU
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -766,7 +792,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||||||
return CertificateInvalidError{c, NotAuthorizedToSign, ""}
|
return CertificateInvalidError{c, NotAuthorizedToSign, ""}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
|
if !opts.DisablePathLenChecks && c.BasicConstraintsValid && c.MaxPathLen >= 0 {
|
||||||
numIntermediates := len(currentChain) - 1
|
numIntermediates := len(currentChain) - 1
|
||||||
if numIntermediates > c.MaxPathLen {
|
if numIntermediates > c.MaxPathLen {
|
||||||
return CertificateInvalidError{c, TooManyIntermediates, ""}
|
return CertificateInvalidError{c, TooManyIntermediates, ""}
|
||||||
@ -776,6 +802,18 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// formatOID formats an ASN.1 OBJECT IDENTIFER in the common, dotted style.
|
||||||
|
func formatOID(oid asn1.ObjectIdentifier) string {
|
||||||
|
ret := ""
|
||||||
|
for i, v := range oid {
|
||||||
|
if i > 0 {
|
||||||
|
ret += "."
|
||||||
|
}
|
||||||
|
ret += strconv.Itoa(v)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
// Verify attempts to verify c by building one or more chains from c to a
|
// Verify attempts to verify c by building one or more chains from c to a
|
||||||
// certificate in opts.Roots, using certificates in opts.Intermediates if
|
// certificate in opts.Roots, using certificates in opts.Intermediates if
|
||||||
// needed. If successful, it returns one or more chains where the first
|
// needed. If successful, it returns one or more chains where the first
|
||||||
@ -840,7 +878,7 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If no key usages are specified, then any are acceptable.
|
// If no key usages are specified, then any are acceptable.
|
||||||
checkEKU := len(c.ExtKeyUsage) > 0
|
checkEKU := !opts.DisableEKUChecks && len(c.ExtKeyUsage) > 0
|
||||||
|
|
||||||
for _, eku := range requestedKeyUsages {
|
for _, eku := range requestedKeyUsages {
|
||||||
if eku == ExtKeyUsageAny {
|
if eku == ExtKeyUsageAny {
|
||||||
@ -850,16 +888,33 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
if checkEKU {
|
if checkEKU {
|
||||||
|
foundMatch := false
|
||||||
NextUsage:
|
NextUsage:
|
||||||
for _, eku := range requestedKeyUsages {
|
for _, eku := range requestedKeyUsages {
|
||||||
for _, leafEKU := range c.ExtKeyUsage {
|
for _, leafEKU := range c.ExtKeyUsage {
|
||||||
if ekuPermittedBy(eku, leafEKU) {
|
if ekuPermittedBy(eku, leafEKU, checkingAgainstLeafCert) {
|
||||||
continue NextUsage
|
foundMatch = true
|
||||||
|
break NextUsage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
oid, _ := oidFromExtKeyUsage(eku)
|
if !foundMatch {
|
||||||
return nil, CertificateInvalidError{c, IncompatibleUsage, fmt.Sprintf("%#v", oid)}
|
msg := "leaf contains the following, recognized EKUs: "
|
||||||
|
|
||||||
|
for i, leafEKU := range c.ExtKeyUsage {
|
||||||
|
oid, ok := oidFromExtKeyUsage(leafEKU)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if i > 0 {
|
||||||
|
msg += ", "
|
||||||
|
}
|
||||||
|
msg += formatOID(oid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, CertificateInvalidError{c, IncompatibleUsage, msg}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
345
vendor/github.com/google/certificate-transparency-go/x509/x509.go
generated
vendored
345
vendor/github.com/google/certificate-transparency-go/x509/x509.go
generated
vendored
@ -8,9 +8,39 @@
|
|||||||
// can be used to override the system default locations for the SSL certificate
|
// can be used to override the system default locations for the SSL certificate
|
||||||
// file and SSL certificate files directory, respectively.
|
// file and SSL certificate files directory, respectively.
|
||||||
//
|
//
|
||||||
// This is a fork of the go library crypto/x509 package, it's more relaxed
|
// This is a fork of the Go library crypto/x509 package, primarily adapted for
|
||||||
// about certificates that it'll accept, and exports the TBSCertificate
|
// use with Certificate Transparency. Main areas of difference are:
|
||||||
// structure.
|
//
|
||||||
|
// - Life as a fork:
|
||||||
|
// - Rename OS-specific cgo code so it doesn't clash with main Go library.
|
||||||
|
// - Use local library imports (asn1, pkix) throughout.
|
||||||
|
// - Add version-specific wrappers for Go version-incompatible code (in
|
||||||
|
// nilref_*_darwin.go, ptr_*_windows.go).
|
||||||
|
// - Laxer certificate parsing:
|
||||||
|
// - Add options to disable various validation checks (times, EKUs etc).
|
||||||
|
// - Use NonFatalErrors type for some errors and continue parsing; this
|
||||||
|
// can be checked with IsFatal(err).
|
||||||
|
// - Support for short bitlength ECDSA curves (in curves.go).
|
||||||
|
// - Certificate Transparency specific function:
|
||||||
|
// - Parsing and marshaling of SCTList extension.
|
||||||
|
// - RemoveSCTList() function for rebuilding CT leaf entry.
|
||||||
|
// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(),
|
||||||
|
// ParseTBSCertificate(), IsPrecertificate()).
|
||||||
|
// - Revocation list processing:
|
||||||
|
// - Detailed CRL parsing (in revoked.go)
|
||||||
|
// - Detailed error recording mechanism (in error.go, errors.go)
|
||||||
|
// - Factor out parseDistributionPoints() for reuse.
|
||||||
|
// - Factor out and generalize GeneralNames parsing (in names.go)
|
||||||
|
// - Fix CRL commenting.
|
||||||
|
// - RPKI support:
|
||||||
|
// - Support for SubjectInfoAccess extension
|
||||||
|
// - Support for RFC3779 extensions (in rpki.go)
|
||||||
|
// - General improvements:
|
||||||
|
// - Export and use OID values throughout.
|
||||||
|
// - Export OIDFromNamedCurve().
|
||||||
|
// - Export SignatureAlgorithmFromAI().
|
||||||
|
// - Add OID value to UnhandledCriticalExtension error.
|
||||||
|
// - Minor typo/lint fixes.
|
||||||
package x509
|
package x509
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -69,7 +99,16 @@ func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
|
|||||||
if algo == UnknownPublicKeyAlgorithm {
|
if algo == UnknownPublicKeyAlgorithm {
|
||||||
return nil, errors.New("x509: unknown public key algorithm")
|
return nil, errors.New("x509: unknown public key algorithm")
|
||||||
}
|
}
|
||||||
return parsePublicKey(algo, &pki)
|
var nfe NonFatalErrors
|
||||||
|
pub, err = parsePublicKey(algo, &pki, &nfe)
|
||||||
|
if err != nil {
|
||||||
|
return pub, err
|
||||||
|
}
|
||||||
|
// Treat non-fatal errors as fatal for this entrypoint.
|
||||||
|
if len(nfe.Errors) > 0 {
|
||||||
|
return nil, nfe.Errors[0]
|
||||||
|
}
|
||||||
|
return pub, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
|
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
|
||||||
@ -500,15 +539,21 @@ func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm
|
|||||||
// secp521r1 OBJECT IDENTIFIER ::= {
|
// secp521r1 OBJECT IDENTIFIER ::= {
|
||||||
// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
|
// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
|
||||||
//
|
//
|
||||||
// NB: secp256r1 is equivalent to prime256v1
|
// secp192r1 OBJECT IDENTIFIER ::= {
|
||||||
|
// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
|
||||||
|
// prime(1) 1 }
|
||||||
|
//
|
||||||
|
// NB: secp256r1 is equivalent to prime256v1,
|
||||||
|
// secp192r1 is equivalent to ansix9p192r and prime192v1
|
||||||
var (
|
var (
|
||||||
OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
|
OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
|
||||||
OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
|
OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
|
||||||
OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
|
OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
|
||||||
OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
|
OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
|
||||||
|
OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1}
|
||||||
)
|
)
|
||||||
|
|
||||||
func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
|
func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve {
|
||||||
switch {
|
switch {
|
||||||
case oid.Equal(OIDNamedCurveP224):
|
case oid.Equal(OIDNamedCurveP224):
|
||||||
return elliptic.P224()
|
return elliptic.P224()
|
||||||
@ -518,6 +563,9 @@ func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
|
|||||||
return elliptic.P384()
|
return elliptic.P384()
|
||||||
case oid.Equal(OIDNamedCurveP521):
|
case oid.Equal(OIDNamedCurveP521):
|
||||||
return elliptic.P521()
|
return elliptic.P521()
|
||||||
|
case oid.Equal(OIDNamedCurveP192):
|
||||||
|
nfe.AddError(errors.New("insecure curve (secp192r1) specified"))
|
||||||
|
return secp192r1()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -534,6 +582,8 @@ func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
|
|||||||
return OIDNamedCurveP384, true
|
return OIDNamedCurveP384, true
|
||||||
case elliptic.P521():
|
case elliptic.P521():
|
||||||
return OIDNamedCurveP521, true
|
return OIDNamedCurveP521, true
|
||||||
|
case secp192r1():
|
||||||
|
return OIDNamedCurveP192, true
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, false
|
return nil, false
|
||||||
@ -737,7 +787,13 @@ type Certificate struct {
|
|||||||
OCSPServer []string
|
OCSPServer []string
|
||||||
IssuingCertificateURL []string
|
IssuingCertificateURL []string
|
||||||
|
|
||||||
// Subject Alternate Name values
|
// Subject Information Access
|
||||||
|
SubjectTimestamps []string
|
||||||
|
SubjectCARepositories []string
|
||||||
|
|
||||||
|
// Subject Alternate Name values. (Note that these values may not be valid
|
||||||
|
// if invalid values were contained within a parsed certificate. For
|
||||||
|
// example, an element of DNSNames may not be a valid DNS domain name.)
|
||||||
DNSNames []string
|
DNSNames []string
|
||||||
EmailAddresses []string
|
EmailAddresses []string
|
||||||
IPAddresses []net.IP
|
IPAddresses []net.IP
|
||||||
@ -759,6 +815,9 @@ type Certificate struct {
|
|||||||
|
|
||||||
PolicyIdentifiers []asn1.ObjectIdentifier
|
PolicyIdentifiers []asn1.ObjectIdentifier
|
||||||
|
|
||||||
|
RPKIAddressRanges []*IPAddressFamilyBlocks
|
||||||
|
RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers
|
||||||
|
|
||||||
// Certificate Transparency SCT extension contents; this is a TLS-encoded
|
// Certificate Transparency SCT extension contents; this is a TLS-encoded
|
||||||
// SignedCertificateTimestampList (RFC 6962 s3.3).
|
// SignedCertificateTimestampList (RFC 6962 s3.3).
|
||||||
RawSCT []byte
|
RawSCT []byte
|
||||||
@ -792,6 +851,20 @@ func (c *Certificate) Equal(other *Certificate) bool {
|
|||||||
return bytes.Equal(c.Raw, other.Raw)
|
return bytes.Equal(c.Raw, other.Raw)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsPrecertificate checks whether the certificate is a precertificate, by
|
||||||
|
// checking for the presence of the CT Poison extension.
|
||||||
|
func (c *Certificate) IsPrecertificate() bool {
|
||||||
|
if c == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, ext := range c.Extensions {
|
||||||
|
if ext.Id.Equal(OIDExtensionCTPoison) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Certificate) hasSANExtension() bool {
|
func (c *Certificate) hasSANExtension() bool {
|
||||||
return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions)
|
return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions)
|
||||||
}
|
}
|
||||||
@ -995,6 +1068,50 @@ func (h UnhandledCriticalExtension) Error() string {
|
|||||||
return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID)
|
return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// removeExtension takes a DER-encoded TBSCertificate, removes the extension
|
||||||
|
// specified by oid (preserving the order of other extensions), and returns the
|
||||||
|
// result still as a DER-encoded TBSCertificate. This function will fail if
|
||||||
|
// there is not exactly 1 extension of the type specified by the oid present.
|
||||||
|
func removeExtension(tbsData []byte, oid asn1.ObjectIdentifier) ([]byte, error) {
|
||||||
|
var tbs tbsCertificate
|
||||||
|
rest, err := asn1.Unmarshal(tbsData, &tbs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
|
||||||
|
} else if rLen := len(rest); rLen > 0 {
|
||||||
|
return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
|
||||||
|
}
|
||||||
|
extAt := -1
|
||||||
|
for i, ext := range tbs.Extensions {
|
||||||
|
if ext.Id.Equal(oid) {
|
||||||
|
if extAt != -1 {
|
||||||
|
return nil, errors.New("multiple extensions of specified type present")
|
||||||
|
}
|
||||||
|
extAt = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if extAt == -1 {
|
||||||
|
return nil, errors.New("no extension of specified type present")
|
||||||
|
}
|
||||||
|
tbs.Extensions = append(tbs.Extensions[:extAt], tbs.Extensions[extAt+1:]...)
|
||||||
|
// Clear out the asn1.RawContent so the re-marshal operation sees the
|
||||||
|
// updated structure (rather than just copying the out-of-date DER data).
|
||||||
|
tbs.Raw = nil
|
||||||
|
|
||||||
|
data, err := asn1.Marshal(tbs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveSCTList takes a DER-encoded TBSCertificate and removes the CT SCT
|
||||||
|
// extension that contains the SCT list (preserving the order of other
|
||||||
|
// extensions), and returns the result still as a DER-encoded TBSCertificate.
|
||||||
|
// This function will fail if there is not exactly 1 CT SCT extension present.
|
||||||
|
func RemoveSCTList(tbsData []byte) ([]byte, error) {
|
||||||
|
return removeExtension(tbsData, OIDExtensionCTSCT)
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison
|
// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison
|
||||||
// extension (preserving the order of other extensions), and returns the result
|
// extension (preserving the order of other extensions), and returns the result
|
||||||
// still as a DER-encoded TBSCertificate. This function will fail if there is
|
// still as a DER-encoded TBSCertificate. This function will fail if there is
|
||||||
@ -1019,27 +1136,18 @@ func RemoveCTPoison(tbsData []byte) ([]byte, error) {
|
|||||||
// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the
|
// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the
|
||||||
// intermediate.
|
// intermediate.
|
||||||
func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
|
func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
|
||||||
|
data, err := removeExtension(tbsData, OIDExtensionCTPoison)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var tbs tbsCertificate
|
var tbs tbsCertificate
|
||||||
rest, err := asn1.Unmarshal(tbsData, &tbs)
|
rest, err := asn1.Unmarshal(data, &tbs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
|
return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
|
||||||
} else if rLen := len(rest); rLen > 0 {
|
} else if rLen := len(rest); rLen > 0 {
|
||||||
return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
|
return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
|
||||||
}
|
}
|
||||||
poisonAt := -1
|
|
||||||
for i, ext := range tbs.Extensions {
|
|
||||||
if ext.Id.Equal(OIDExtensionCTPoison) {
|
|
||||||
if poisonAt != -1 {
|
|
||||||
return nil, errors.New("multiple CT poison extensions present")
|
|
||||||
}
|
|
||||||
poisonAt = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if poisonAt == -1 {
|
|
||||||
return nil, errors.New("no CT poison extension present")
|
|
||||||
}
|
|
||||||
tbs.Extensions = append(tbs.Extensions[:poisonAt], tbs.Extensions[poisonAt+1:]...)
|
|
||||||
tbs.Raw = nil
|
|
||||||
|
|
||||||
if preIssuer != nil {
|
if preIssuer != nil {
|
||||||
// Update the precert's Issuer field. Use the RawIssuer rather than the
|
// Update the precert's Issuer field. Use the RawIssuer rather than the
|
||||||
@ -1092,9 +1200,13 @@ func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
tbs.Extensions = append(tbs.Extensions, authKeyIDExt)
|
tbs.Extensions = append(tbs.Extensions, authKeyIDExt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear out the asn1.RawContent so the re-marshal operation sees the
|
||||||
|
// updated structure (rather than just copying the out-of-date DER data).
|
||||||
|
tbs.Raw = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := asn1.Marshal(tbs)
|
data, err = asn1.Marshal(tbs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
|
return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
|
||||||
}
|
}
|
||||||
@ -1120,7 +1232,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// RFC 5280, 4.2.2.1
|
// RFC 5280, 4.2.2.1
|
||||||
type authorityInfoAccess struct {
|
type accessDescription struct {
|
||||||
Method asn1.ObjectIdentifier
|
Method asn1.ObjectIdentifier
|
||||||
Location asn1.RawValue
|
Location asn1.RawValue
|
||||||
}
|
}
|
||||||
@ -1137,14 +1249,14 @@ type distributionPointName struct {
|
|||||||
RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
|
RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
|
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) {
|
||||||
asn1Data := keyData.PublicKey.RightAlign()
|
asn1Data := keyData.PublicKey.RightAlign()
|
||||||
switch algo {
|
switch algo {
|
||||||
case RSA:
|
case RSA:
|
||||||
// RSA public keys must have a NULL in the parameters
|
// RSA public keys must have a NULL in the parameters
|
||||||
// (https://tools.ietf.org/html/rfc3279#section-2.3.1).
|
// (https://tools.ietf.org/html/rfc3279#section-2.3.1).
|
||||||
if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
|
if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
|
||||||
return nil, errors.New("x509: RSA key missing NULL parameters")
|
nfe.AddError(errors.New("x509: RSA key missing NULL parameters"))
|
||||||
}
|
}
|
||||||
|
|
||||||
p := new(pkcs1PublicKey)
|
p := new(pkcs1PublicKey)
|
||||||
@ -1208,9 +1320,9 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
|
|||||||
if len(rest) != 0 {
|
if len(rest) != 0 {
|
||||||
return nil, errors.New("x509: trailing data after ECDSA parameters")
|
return nil, errors.New("x509: trailing data after ECDSA parameters")
|
||||||
}
|
}
|
||||||
namedCurve := namedCurveFromOID(*namedCurveOID)
|
namedCurve := namedCurveFromOID(*namedCurveOID, nfe)
|
||||||
if namedCurve == nil {
|
if namedCurve == nil {
|
||||||
return nil, errors.New("x509: unsupported elliptic curve")
|
return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID)
|
||||||
}
|
}
|
||||||
x, y := elliptic.Unmarshal(namedCurve, asn1Data)
|
x, y := elliptic.Unmarshal(namedCurve, asn1Data)
|
||||||
if x == nil {
|
if x == nil {
|
||||||
@ -1235,7 +1347,7 @@ type NonFatalErrors struct {
|
|||||||
Errors []error
|
Errors []error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds an error to the list of errors contained by NonFatalErrors.
|
// AddError adds an error to the list of errors contained by NonFatalErrors.
|
||||||
func (e *NonFatalErrors) AddError(err error) {
|
func (e *NonFatalErrors) AddError(err error) {
|
||||||
e.Errors = append(e.Errors, err)
|
e.Errors = append(e.Errors, err)
|
||||||
}
|
}
|
||||||
@ -1250,11 +1362,25 @@ func (e NonFatalErrors) Error() string {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if |e| contains at least one error
|
// HasError returns true if |e| contains at least one error
|
||||||
func (e *NonFatalErrors) HasError() bool {
|
func (e *NonFatalErrors) HasError() bool {
|
||||||
return len(e.Errors) > 0
|
return len(e.Errors) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsFatal indicates whether an error is fatal.
|
||||||
|
func IsFatal(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if _, ok := err.(NonFatalErrors); ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if errs, ok := err.(*Errors); ok {
|
||||||
|
return errs.Fatal()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func parseDistributionPoints(data []byte, crldp *[]string) error {
|
func parseDistributionPoints(data []byte, crldp *[]string) error {
|
||||||
// CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
|
// CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
|
||||||
//
|
//
|
||||||
@ -1337,17 +1463,9 @@ func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddres
|
|||||||
err = forEachSAN(value, func(tag int, data []byte) error {
|
err = forEachSAN(value, func(tag int, data []byte) error {
|
||||||
switch tag {
|
switch tag {
|
||||||
case nameTypeEmail:
|
case nameTypeEmail:
|
||||||
mailbox := string(data)
|
emailAddresses = append(emailAddresses, string(data))
|
||||||
if _, ok := parseRFC2821Mailbox(mailbox); !ok {
|
|
||||||
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
|
|
||||||
}
|
|
||||||
emailAddresses = append(emailAddresses, mailbox)
|
|
||||||
case nameTypeDNS:
|
case nameTypeDNS:
|
||||||
domain := string(data)
|
dnsNames = append(dnsNames, string(data))
|
||||||
if _, ok := domainToReverseLabels(domain); !ok {
|
|
||||||
return fmt.Errorf("x509: cannot parse dnsName %q", string(data))
|
|
||||||
}
|
|
||||||
dnsNames = append(dnsNames, domain)
|
|
||||||
case nameTypeURI:
|
case nameTypeURI:
|
||||||
uri, err := url.Parse(string(data))
|
uri, err := url.Parse(string(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1364,7 +1482,7 @@ func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddres
|
|||||||
case net.IPv4len, net.IPv6len:
|
case net.IPv4len, net.IPv6len:
|
||||||
ipAddresses = append(ipAddresses, data)
|
ipAddresses = append(ipAddresses, data)
|
||||||
default:
|
default:
|
||||||
nfe.AddError(fmt.Errorf("x509: certificate contained IP address of length %d : %v", len(data), data))
|
nfe.AddError(errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1399,7 +1517,7 @@ func isValidIPMask(mask []byte) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandled bool, err error) {
|
func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonFatalErrors) (unhandled bool, err error) {
|
||||||
// RFC 5280, 4.2.1.10
|
// RFC 5280, 4.2.1.10
|
||||||
|
|
||||||
// NameConstraints ::= SEQUENCE {
|
// NameConstraints ::= SEQUENCE {
|
||||||
@ -1466,7 +1584,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||||||
trimmedDomain = trimmedDomain[1:]
|
trimmedDomain = trimmedDomain[1:]
|
||||||
}
|
}
|
||||||
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
||||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)
|
nfe.AddError(fmt.Errorf("x509: failed to parse dnsName constraint %q", domain))
|
||||||
}
|
}
|
||||||
dnsNames = append(dnsNames, domain)
|
dnsNames = append(dnsNames, domain)
|
||||||
|
|
||||||
@ -1503,7 +1621,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||||||
// it specifies an exact mailbox name.
|
// it specifies an exact mailbox name.
|
||||||
if strings.Contains(constraint, "@") {
|
if strings.Contains(constraint, "@") {
|
||||||
if _, ok := parseRFC2821Mailbox(constraint); !ok {
|
if _, ok := parseRFC2821Mailbox(constraint); !ok {
|
||||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
|
nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Otherwise it's a domain name.
|
// Otherwise it's a domain name.
|
||||||
@ -1512,7 +1630,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||||||
domain = domain[1:]
|
domain = domain[1:]
|
||||||
}
|
}
|
||||||
if _, ok := domainToReverseLabels(domain); !ok {
|
if _, ok := domainToReverseLabels(domain); !ok {
|
||||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
|
nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
emails = append(emails, constraint)
|
emails = append(emails, constraint)
|
||||||
@ -1536,7 +1654,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||||||
trimmedDomain = trimmedDomain[1:]
|
trimmedDomain = trimmedDomain[1:]
|
||||||
}
|
}
|
||||||
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
||||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain)
|
nfe.AddError(fmt.Errorf("x509: failed to parse URI constraint %q", domain))
|
||||||
}
|
}
|
||||||
uriDomains = append(uriDomains, domain)
|
uriDomains = append(uriDomains, domain)
|
||||||
|
|
||||||
@ -1575,7 +1693,7 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||||||
out.PublicKeyAlgorithm =
|
out.PublicKeyAlgorithm =
|
||||||
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
|
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
|
||||||
var err error
|
var err error
|
||||||
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
|
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1651,7 +1769,7 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case OIDExtensionNameConstraints[3]:
|
case OIDExtensionNameConstraints[3]:
|
||||||
unhandled, err = parseNameConstraintsExtension(out, e)
|
unhandled, err = parseNameConstraintsExtension(out, e, &nfe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1682,10 +1800,14 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||||||
// KeyPurposeId ::= OBJECT IDENTIFIER
|
// KeyPurposeId ::= OBJECT IDENTIFIER
|
||||||
|
|
||||||
var keyUsage []asn1.ObjectIdentifier
|
var keyUsage []asn1.ObjectIdentifier
|
||||||
if rest, err := asn1.Unmarshal(e.Value, &keyUsage); err != nil {
|
if len(e.Value) == 0 {
|
||||||
return nil, err
|
nfe.AddError(errors.New("x509: empty ExtendedKeyUsage"))
|
||||||
} else if len(rest) != 0 {
|
} else {
|
||||||
return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
|
if rest, err := asn1.Unmarshal(e.Value, &keyUsage); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if len(rest) != 0 {
|
||||||
|
return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, u := range keyUsage {
|
for _, u := range keyUsage {
|
||||||
@ -1725,12 +1847,15 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||||||
}
|
}
|
||||||
} else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) {
|
} else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) {
|
||||||
// RFC 5280 4.2.2.1: Authority Information Access
|
// RFC 5280 4.2.2.1: Authority Information Access
|
||||||
var aia []authorityInfoAccess
|
var aia []accessDescription
|
||||||
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
|
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if len(rest) != 0 {
|
} else if len(rest) != 0 {
|
||||||
return nil, errors.New("x509: trailing data after X.509 authority information")
|
return nil, errors.New("x509: trailing data after X.509 authority information")
|
||||||
}
|
}
|
||||||
|
if len(aia) == 0 {
|
||||||
|
nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension"))
|
||||||
|
}
|
||||||
|
|
||||||
for _, v := range aia {
|
for _, v := range aia {
|
||||||
// GeneralName: uniformResourceIdentifier [6] IA5String
|
// GeneralName: uniformResourceIdentifier [6] IA5String
|
||||||
@ -1743,6 +1868,34 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||||||
out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes))
|
out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if e.Id.Equal(OIDExtensionSubjectInfoAccess) {
|
||||||
|
// RFC 5280 4.2.2.2: Subject Information Access
|
||||||
|
var sia []accessDescription
|
||||||
|
if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if len(rest) != 0 {
|
||||||
|
return nil, errors.New("x509: trailing data after X.509 subject information")
|
||||||
|
}
|
||||||
|
if len(sia) == 0 {
|
||||||
|
nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range sia {
|
||||||
|
// TODO(drysdale): cope with non-URI types of GeneralName
|
||||||
|
// GeneralName: uniformResourceIdentifier [6] IA5String
|
||||||
|
if v.Location.Tag != 6 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v.Method.Equal(OIDSubjectInfoAccessTimestamp) {
|
||||||
|
out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes))
|
||||||
|
} else if v.Method.Equal(OIDSubjectInfoAccessCARepo) {
|
||||||
|
out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if e.Id.Equal(OIDExtensionIPPrefixList) {
|
||||||
|
out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe)
|
||||||
|
} else if e.Id.Equal(OIDExtensionASList) {
|
||||||
|
out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe)
|
||||||
} else if e.Id.Equal(OIDExtensionCTSCT) {
|
} else if e.Id.Equal(OIDExtensionCTSCT) {
|
||||||
if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil {
|
if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil {
|
||||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err))
|
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err))
|
||||||
@ -1787,6 +1940,8 @@ func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ParseCertificate parses a single certificate from the given ASN.1 DER data.
|
// ParseCertificate parses a single certificate from the given ASN.1 DER data.
|
||||||
|
// This function can return both a Certificate and an error (in which case the
|
||||||
|
// error will be of type NonFatalErrors).
|
||||||
func ParseCertificate(asn1Data []byte) (*Certificate, error) {
|
func ParseCertificate(asn1Data []byte) (*Certificate, error) {
|
||||||
var cert certificate
|
var cert certificate
|
||||||
rest, err := asn1.Unmarshal(asn1Data, &cert)
|
rest, err := asn1.Unmarshal(asn1Data, &cert)
|
||||||
@ -1802,6 +1957,8 @@ func ParseCertificate(asn1Data []byte) (*Certificate, error) {
|
|||||||
|
|
||||||
// ParseCertificates parses one or more certificates from the given ASN.1 DER
|
// ParseCertificates parses one or more certificates from the given ASN.1 DER
|
||||||
// data. The certificates must be concatenated with no intermediate padding.
|
// data. The certificates must be concatenated with no intermediate padding.
|
||||||
|
// This function can return both a slice of Certificate and an error (in which
|
||||||
|
// case the error will be of type NonFatalErrors).
|
||||||
func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
|
func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
|
||||||
var v []*certificate
|
var v []*certificate
|
||||||
|
|
||||||
@ -1815,15 +1972,23 @@ func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
|
|||||||
v = append(v, cert)
|
v = append(v, cert)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var nfe NonFatalErrors
|
||||||
ret := make([]*Certificate, len(v))
|
ret := make([]*Certificate, len(v))
|
||||||
for i, ci := range v {
|
for i, ci := range v {
|
||||||
cert, err := parseCertificate(ci)
|
cert, err := parseCertificate(ci)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
if errs, ok := err.(NonFatalErrors); !ok {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
nfe.Errors = append(nfe.Errors, errs.Errors...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ret[i] = cert
|
ret[i] = cert
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if nfe.HasError() {
|
||||||
|
return ret, nfe
|
||||||
|
}
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1875,15 +2040,23 @@ var (
|
|||||||
|
|
||||||
OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1}
|
OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1}
|
||||||
OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11}
|
OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11}
|
||||||
|
|
||||||
// OIDExtensionCTPoison is defined in RFC 6962 s3.1.
|
// OIDExtensionCTPoison is defined in RFC 6962 s3.1.
|
||||||
OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
|
OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
|
||||||
// OIDExtensionCTSCT is defined in RFC 6962 s3.3.
|
// OIDExtensionCTSCT is defined in RFC 6962 s3.3.
|
||||||
OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
|
OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
|
||||||
|
// OIDExtensionIPPrefixList is defined in RFC 3779 s2.
|
||||||
|
OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7}
|
||||||
|
// OIDExtensionASList is defined in RFC 3779 s3.
|
||||||
|
OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
|
OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
|
||||||
OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
|
OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
|
||||||
|
OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3}
|
||||||
|
OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5}
|
||||||
|
OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0}
|
||||||
)
|
)
|
||||||
|
|
||||||
// oidInExtensions returns whether an extension with the given oid exists in
|
// oidInExtensions returns whether an extension with the given oid exists in
|
||||||
@ -1932,7 +2105,7 @@ func isIA5String(s string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) {
|
func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) {
|
||||||
ret = make([]pkix.Extension, 11 /* maximum number of elements. */)
|
ret = make([]pkix.Extension, 12 /* maximum number of elements. */)
|
||||||
n := 0
|
n := 0
|
||||||
|
|
||||||
if template.KeyUsage != 0 &&
|
if template.KeyUsage != 0 &&
|
||||||
@ -2017,15 +2190,15 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
|
|||||||
if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
|
if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
|
||||||
!oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) {
|
!oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) {
|
||||||
ret[n].Id = OIDExtensionAuthorityInfoAccess
|
ret[n].Id = OIDExtensionAuthorityInfoAccess
|
||||||
var aiaValues []authorityInfoAccess
|
var aiaValues []accessDescription
|
||||||
for _, name := range template.OCSPServer {
|
for _, name := range template.OCSPServer {
|
||||||
aiaValues = append(aiaValues, authorityInfoAccess{
|
aiaValues = append(aiaValues, accessDescription{
|
||||||
Method: OIDAuthorityInfoAccessOCSP,
|
Method: OIDAuthorityInfoAccessOCSP,
|
||||||
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
|
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, name := range template.IssuingCertificateURL {
|
for _, name := range template.IssuingCertificateURL {
|
||||||
aiaValues = append(aiaValues, authorityInfoAccess{
|
aiaValues = append(aiaValues, accessDescription{
|
||||||
Method: OIDAuthorityInfoAccessIssuers,
|
Method: OIDAuthorityInfoAccessIssuers,
|
||||||
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
|
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
|
||||||
})
|
})
|
||||||
@ -2037,6 +2210,29 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
|
|||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 &&
|
||||||
|
!oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) {
|
||||||
|
ret[n].Id = OIDExtensionSubjectInfoAccess
|
||||||
|
var siaValues []accessDescription
|
||||||
|
for _, ts := range template.SubjectTimestamps {
|
||||||
|
siaValues = append(siaValues, accessDescription{
|
||||||
|
Method: OIDSubjectInfoAccessTimestamp,
|
||||||
|
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, repo := range template.SubjectCARepositories {
|
||||||
|
siaValues = append(siaValues, accessDescription{
|
||||||
|
Method: OIDSubjectInfoAccessCARepo,
|
||||||
|
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ret[n].Value, err = asn1.Marshal(siaValues)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
|
||||||
if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
|
if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
|
||||||
!oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) {
|
!oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) {
|
||||||
ret[n].Id = OIDExtensionSubjectAltName
|
ret[n].Id = OIDExtensionSubjectAltName
|
||||||
@ -2290,12 +2486,25 @@ func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgori
|
|||||||
var emptyASN1Subject = []byte{0x30, 0}
|
var emptyASN1Subject = []byte{0x30, 0}
|
||||||
|
|
||||||
// CreateCertificate creates a new X.509v3 certificate based on a template.
|
// CreateCertificate creates a new X.509v3 certificate based on a template.
|
||||||
// The following members of template are used: AuthorityKeyId,
|
// The following members of template are used:
|
||||||
// BasicConstraintsValid, DNSNames, ExcludedDNSDomains, ExtKeyUsage,
|
// - SerialNumber
|
||||||
// IsCA, KeyUsage, MaxPathLen, MaxPathLenZero, NotAfter, NotBefore,
|
// - Subject
|
||||||
// PermittedDNSDomains, PermittedDNSDomainsCritical, SerialNumber,
|
// - NotBefore, NotAfter
|
||||||
// SignatureAlgorithm, Subject, SubjectKeyId, UnknownExtKeyUsage,
|
// - SignatureAlgorithm
|
||||||
// and RawSCT.
|
// - For extensions:
|
||||||
|
// - KeyUsage
|
||||||
|
// - ExtKeyUsage
|
||||||
|
// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero
|
||||||
|
// - SubjectKeyId
|
||||||
|
// - AuthorityKeyId
|
||||||
|
// - OCSPServer, IssuingCertificateURL
|
||||||
|
// - SubjectTimestamps, SubjectCARepositories
|
||||||
|
// - DNSNames, EmailAddresses, IPAddresses, URIs
|
||||||
|
// - PolicyIdentifiers
|
||||||
|
// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical,
|
||||||
|
// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains
|
||||||
|
// - CRLDistributionPoints
|
||||||
|
// - RawSCT, SCTList
|
||||||
//
|
//
|
||||||
// The certificate is signed by parent. If parent is equal to template then the
|
// The certificate is signed by parent. If parent is equal to template then the
|
||||||
// certificate is self-signed. The parameter pub is the public key of the
|
// certificate is self-signed. The parameter pub is the public key of the
|
||||||
@ -2804,10 +3013,15 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey)
|
var nfe NonFatalErrors
|
||||||
|
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Treat non-fatal errors as fatal here.
|
||||||
|
if len(nfe.Errors) > 0 {
|
||||||
|
return nil, nfe.Errors[0]
|
||||||
|
}
|
||||||
|
|
||||||
var subject pkix.RDNSequence
|
var subject pkix.RDNSequence
|
||||||
if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
|
if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
|
||||||
@ -2822,7 +3036,6 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var nfe NonFatalErrors
|
|
||||||
for _, extension := range out.Extensions {
|
for _, extension := range out.Extensions {
|
||||||
if extension.Id.Equal(OIDExtensionSubjectAltName) {
|
if extension.Id.Equal(OIDExtensionSubjectAltName) {
|
||||||
out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe)
|
out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe)
|
||||||
|
Loading…
Reference in New Issue
Block a user